code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def __a ( a, a, a, a=1_0_2_4 ):
"""simple docstring"""
_a , _a = [], []
_a = list(zip(a, a ) )
_a , _a = sorted_examples[0]
def is_too_big(a ):
return tok(a, return_tensors="pt" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
_a = new_src + " " + src
_a = new_tgt + " " + tgt
if is_too_big(a ) or is_too_big(a ): # cant fit, finalize example
finished_src.append(a )
finished_tgt.append(a )
_a , _a = src, tgt
else: # can fit, keep adding
_a , _a = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(a )
finished_tgt.append(a )
return finished_src, finished_tgt
def __a ( a, a, a, a ):
"""simple docstring"""
_a = Path(a )
save_path.mkdir(exist_ok=a )
for split in ["train"]:
_a , _a = data_dir / F'{split}.source', data_dir / F'{split}.target'
_a = [x.rstrip() for x in Path(a ).open().readlines()]
_a = [x.rstrip() for x in Path(a ).open().readlines()]
_a , _a = pack_examples(a, a, a, a )
print(F'packed {split} split from {len(a )} examples -> {len(a )}.' )
Path(save_path / F'{split}.source' ).open("w" ).write("\n".join(a ) )
Path(save_path / F'{split}.target' ).open("w" ).write("\n".join(a ) )
for split in ["val", "test"]:
_a , _a = data_dir / F'{split}.source', data_dir / F'{split}.target'
shutil.copyfile(a, save_path / F'{split}.source' )
shutil.copyfile(a, save_path / F'{split}.target' )
def __a ( ):
"""simple docstring"""
_a = argparse.ArgumentParser()
parser.add_argument("--tok_name", type=a, help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("--max_seq_len", type=a, default=1_2_8 )
parser.add_argument("--data_dir", type=a )
parser.add_argument("--save_path", type=a )
_a = parser.parse_args()
_a = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(a, Path(args.data_dir ), args.max_seq_len, args.save_path )
if __name__ == "__main__":
packer_cli()
| 388
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE = {
"""configuration_megatron_bert""": ["""MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegatronBertConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
"""MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegatronBertForCausalLM""",
"""MegatronBertForMaskedLM""",
"""MegatronBertForMultipleChoice""",
"""MegatronBertForNextSentencePrediction""",
"""MegatronBertForPreTraining""",
"""MegatronBertForQuestionAnswering""",
"""MegatronBertForSequenceClassification""",
"""MegatronBertForTokenClassification""",
"""MegatronBertModel""",
"""MegatronBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 388
| 1
|
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class a__( snake_case__ , snake_case__ , unittest.TestCase ):
a_ : int = IFImgaImgSuperResolutionPipeline
a_ : int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
a_ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
a_ : Optional[Any] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def _lowercase ( self ) -> Dict:
return self._get_superresolution_dummy_components()
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase=0 ) -> Optional[int]:
if str(_UpperCAmelCase ).startswith('mps' ):
snake_case__ =torch.manual_seed(_UpperCAmelCase )
else:
snake_case__ =torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
snake_case__ =floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
snake_case__ =floats_tensor((1, 3, 16, 16) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
snake_case__ ={
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _lowercase ( self ) -> Union[str, Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _lowercase ( self ) -> Optional[int]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def _lowercase ( self ) -> str:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _lowercase ( self ) -> Optional[Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _lowercase ( self ) -> Dict:
self._test_save_load_local()
def _lowercase ( self ) -> int:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 581
|
'''simple docstring'''
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def a ( UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : str ) -> List[str]:
def get_masked_lm_array(UpperCamelCase_ : str ):
snake_case__ =f"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
snake_case__ =tf.train.load_variable(UpperCamelCase_ , UpperCamelCase_ )
if "kernel" in name:
snake_case__ =array.transpose()
return torch.from_numpy(UpperCamelCase_ )
def get_encoder_array(UpperCamelCase_ : str ):
snake_case__ =f"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
snake_case__ =tf.train.load_variable(UpperCamelCase_ , UpperCamelCase_ )
if "kernel" in name:
snake_case__ =array.transpose()
return torch.from_numpy(UpperCamelCase_ )
def get_encoder_layer_array(UpperCamelCase_ : int , UpperCamelCase_ : str ):
snake_case__ =f"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
snake_case__ =tf.train.load_variable(UpperCamelCase_ , UpperCamelCase_ )
if "kernel" in name:
snake_case__ =array.transpose()
return torch.from_numpy(UpperCamelCase_ )
def get_encoder_attention_layer_array(UpperCamelCase_ : int , UpperCamelCase_ : str , UpperCamelCase_ : int ):
snake_case__ =f"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
snake_case__ =tf.train.load_variable(UpperCamelCase_ , UpperCamelCase_ )
snake_case__ =array.reshape(UpperCamelCase_ )
if "kernel" in name:
snake_case__ =array.transpose()
return torch.from_numpy(UpperCamelCase_ )
print(f"""Loading model based on config from {config_path}...""" )
snake_case__ =BertConfig.from_json_file(UpperCamelCase_ )
snake_case__ =BertForMaskedLM(UpperCamelCase_ )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
snake_case__ =model.bert.encoder.layer[layer_index]
# Self-attention
snake_case__ =layer.attention.self
snake_case__ =get_encoder_attention_layer_array(
UpperCamelCase_ , '_query_dense/kernel' , self_attn.query.weight.data.shape )
snake_case__ =get_encoder_attention_layer_array(
UpperCamelCase_ , '_query_dense/bias' , self_attn.query.bias.data.shape )
snake_case__ =get_encoder_attention_layer_array(
UpperCamelCase_ , '_key_dense/kernel' , self_attn.key.weight.data.shape )
snake_case__ =get_encoder_attention_layer_array(
UpperCamelCase_ , '_key_dense/bias' , self_attn.key.bias.data.shape )
snake_case__ =get_encoder_attention_layer_array(
UpperCamelCase_ , '_value_dense/kernel' , self_attn.value.weight.data.shape )
snake_case__ =get_encoder_attention_layer_array(
UpperCamelCase_ , '_value_dense/bias' , self_attn.value.bias.data.shape )
# Self-attention Output
snake_case__ =layer.attention.output
snake_case__ =get_encoder_attention_layer_array(
UpperCamelCase_ , '_output_dense/kernel' , self_output.dense.weight.data.shape )
snake_case__ =get_encoder_attention_layer_array(
UpperCamelCase_ , '_output_dense/bias' , self_output.dense.bias.data.shape )
snake_case__ =get_encoder_layer_array(UpperCamelCase_ , '_attention_layer_norm/gamma' )
snake_case__ =get_encoder_layer_array(UpperCamelCase_ , '_attention_layer_norm/beta' )
# Intermediate
snake_case__ =layer.intermediate
snake_case__ =get_encoder_layer_array(UpperCamelCase_ , '_intermediate_dense/kernel' )
snake_case__ =get_encoder_layer_array(UpperCamelCase_ , '_intermediate_dense/bias' )
# Output
snake_case__ =layer.output
snake_case__ =get_encoder_layer_array(UpperCamelCase_ , '_output_dense/kernel' )
snake_case__ =get_encoder_layer_array(UpperCamelCase_ , '_output_dense/bias' )
snake_case__ =get_encoder_layer_array(UpperCamelCase_ , '_output_layer_norm/gamma' )
snake_case__ =get_encoder_layer_array(UpperCamelCase_ , '_output_layer_norm/beta' )
# Embeddings
snake_case__ =get_encoder_array('_position_embedding_layer/embeddings' )
snake_case__ =get_encoder_array('_type_embedding_layer/embeddings' )
snake_case__ =get_encoder_array('_embedding_norm_layer/gamma' )
snake_case__ =get_encoder_array('_embedding_norm_layer/beta' )
# LM Head
snake_case__ =model.cls.predictions.transform
snake_case__ =get_masked_lm_array('dense/kernel' )
snake_case__ =get_masked_lm_array('dense/bias' )
snake_case__ =get_masked_lm_array('layer_norm/gamma' )
snake_case__ =get_masked_lm_array('layer_norm/beta' )
snake_case__ =get_masked_lm_array('embedding_table' )
# Pooling
snake_case__ =BertPooler(config=UpperCamelCase_ )
snake_case__ =get_encoder_array('_pooler_layer/kernel' )
snake_case__ =get_encoder_array('_pooler_layer/bias' )
# Export final model
model.save_pretrained(UpperCamelCase_ )
# Integration test - should load without any errors ;)
snake_case__ =BertForMaskedLM.from_pretrained(UpperCamelCase_ )
print(new_model.eval() )
print('Model conversion was done sucessfully!' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
parser.add_argument(
'''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow Token Dropping checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
type=str,
required=True,
help='''The config json file corresponding to the BERT model. This specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''',
type=str,
required=True,
help='''Path to the output PyTorch model.''',
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 581
| 1
|
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
lowercase_ = TypeVar("""T""")
class __UpperCamelCase ( Generic[T] ):
"""simple docstring"""
def __init__( self : List[Any] , _A : list[T] , _A : Callable[[T, T], T] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any | T = None
__SCREAMING_SNAKE_CASE : int = len(_A )
__SCREAMING_SNAKE_CASE : list[T] = [any_type for _ in range(self.N )] + arr
__SCREAMING_SNAKE_CASE : Optional[int] = fnc
self.build()
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
for p in range(self.N - 1 , 0 , -1 ):
__SCREAMING_SNAKE_CASE : List[Any] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def UpperCAmelCase__ ( self : str , _A : int , _A : T ):
"""simple docstring"""
p += self.N
__SCREAMING_SNAKE_CASE : Tuple = v
while p > 1:
__SCREAMING_SNAKE_CASE : Union[str, Any] = p // 2
__SCREAMING_SNAKE_CASE : Any = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def UpperCAmelCase__ ( self : int , _A : int , _A : int ): # noqa: E741
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = l + self.N, r + self.N
__SCREAMING_SNAKE_CASE : T | None = None
while l <= r:
if l % 2 == 1:
__SCREAMING_SNAKE_CASE : Optional[int] = self.st[l] if res is None else self.fn(_A , self.st[l] )
if r % 2 == 0:
__SCREAMING_SNAKE_CASE : int = self.st[r] if res is None else self.fn(_A , self.st[r] )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
lowercase_ = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
lowercase_ = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
lowercase_ = SegmentTree(test_array, min)
lowercase_ = SegmentTree(test_array, max)
lowercase_ = SegmentTree(test_array, lambda a, b: a + b)
def a__ ( ):
"""simple docstring"""
for i in range(len(snake_case ) ):
for j in range(snake_case , len(snake_case ) ):
__SCREAMING_SNAKE_CASE : Any = reduce(snake_case , test_array[i : j + 1] )
__SCREAMING_SNAKE_CASE : Tuple = reduce(snake_case , test_array[i : j + 1] )
__SCREAMING_SNAKE_CASE : str = reduce(lambda snake_case , snake_case : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(snake_case , snake_case )
assert max_range == max_segment_tree.query(snake_case , snake_case )
assert sum_range == sum_segment_tree.query(snake_case , snake_case )
test_all_segments()
for index, value in test_updates.items():
lowercase_ = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 74
|
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class __A ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = '''hf-internal-testing/tiny-random-t5'''
lowerCamelCase__ = AutoTokenizer.from_pretrained(__lowerCAmelCase )
lowerCamelCase__ = AutoModelForSeqaSeqLM.from_pretrained(__lowerCAmelCase )
lowerCamelCase__ = tokenizer('''This is me''' , return_tensors='''pt''' )
lowerCamelCase__ = model.to_bettertransformer()
self.assertTrue(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
lowerCamelCase__ = model.generate(**__lowerCAmelCase )
lowerCamelCase__ = model.reverse_bettertransformer()
self.assertFalse(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCAmelCase )
lowerCamelCase__ = AutoModelForSeqaSeqLM.from_pretrained(__lowerCAmelCase )
self.assertFalse(
any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
lowerCamelCase__ = model_reloaded.generate(**__lowerCAmelCase )
self.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = '''hf-internal-testing/tiny-random-t5'''
lowerCamelCase__ = AutoModelForSeqaSeqLM.from_pretrained(__lowerCAmelCase )
lowerCamelCase__ = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(__lowerCAmelCase ):
model.save_pretrained(__lowerCAmelCase )
lowerCamelCase__ = model.reverse_bettertransformer()
model.save_pretrained(__lowerCAmelCase )
| 481
| 0
|
import torch
from diffusers import StableDiffusionPipeline
UpperCAmelCase_ = '''path-to-your-trained-model'''
UpperCAmelCase_ = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''')
UpperCAmelCase_ = '''A photo of sks dog in a bucket'''
UpperCAmelCase_ = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('''dog-bucket.png''')
| 707
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase_ = {
'''vocab_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'''
),
},
'''tokenizer_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''',
'''roberta-base-openai-detector''': (
'''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'''
),
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'''
),
},
}
UpperCAmelCase_ = {
'''roberta-base''': 512,
'''roberta-large''': 512,
'''roberta-large-mnli''': 512,
'''distilroberta-base''': 512,
'''roberta-base-openai-detector''': 512,
'''roberta-large-openai-detector''': 512,
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ['input_ids', 'attention_mask']
SCREAMING_SNAKE_CASE_ = RobertaTokenizer
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="replace" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="<mask>" , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , **SCREAMING_SNAKE_CASE__ , ):
"""simple docstring"""
super().__init__(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , errors=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
_snake_case : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , SCREAMING_SNAKE_CASE__ ) != add_prefix_space:
_snake_case : List[Any] = getattr(SCREAMING_SNAKE_CASE__ , pre_tok_state.pop("""type""" ) )
_snake_case : List[str] = add_prefix_space
_snake_case : Union[str, Any] = pre_tok_class(**SCREAMING_SNAKE_CASE__ )
_snake_case : Dict = add_prefix_space
_snake_case : Optional[Any] = """post_processor"""
_snake_case : Optional[Any] = getattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if tokenizer_component_instance:
_snake_case : Optional[Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_snake_case : Optional[Any] = tuple(state["""sep"""] )
if "cls" in state:
_snake_case : int = tuple(state["""cls"""] )
_snake_case : Tuple = False
if state.get("""add_prefix_space""" , SCREAMING_SNAKE_CASE__ ) != add_prefix_space:
_snake_case : Tuple = add_prefix_space
_snake_case : Tuple = True
if state.get("""trim_offsets""" , SCREAMING_SNAKE_CASE__ ) != trim_offsets:
_snake_case : str = trim_offsets
_snake_case : List[str] = True
if changes_to_apply:
_snake_case : Tuple = getattr(SCREAMING_SNAKE_CASE__ , state.pop("""type""" ) )
_snake_case : Tuple = component_class(**SCREAMING_SNAKE_CASE__ )
setattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@property
def __lowerCamelCase( self ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCamelCase( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_snake_case : List[Any] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else value
_snake_case : Any = value
def __lowerCamelCase( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_snake_case : str = kwargs.get("""is_split_into_words""" , SCREAMING_SNAKE_CASE__ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_snake_case : List[str] = kwargs.get("""is_split_into_words""" , SCREAMING_SNAKE_CASE__ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
"""simple docstring"""
_snake_case : Any = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
"""simple docstring"""
_snake_case : Tuple = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowerCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
"""simple docstring"""
_snake_case : Union[str, Any] = [self.sep_token_id]
_snake_case : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 519
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase = {
"""configuration_blenderbot""": [
"""BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotConfig""",
"""BlenderbotOnnxConfig""",
],
"""tokenization_blenderbot""": ["""BlenderbotTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["""BlenderbotTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotForCausalLM""",
"""BlenderbotForConditionalGeneration""",
"""BlenderbotModel""",
"""BlenderbotPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""TFBlenderbotForConditionalGeneration""",
"""TFBlenderbotModel""",
"""TFBlenderbotPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""FlaxBlenderbotForConditionalGeneration""",
"""FlaxBlenderbotModel""",
"""FlaxBlenderbotPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 104
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"""deepmind/language-perceiver""": """https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json""",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : Optional[int] = "perceiver"
def __init__( self , SCREAMING_SNAKE_CASE__=256 , SCREAMING_SNAKE_CASE__=1280 , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=26 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="kv" , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.0_2 , SCREAMING_SNAKE_CASE__=1e-12 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=262 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=56 , SCREAMING_SNAKE_CASE__=[368, 496] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=1920 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=[1, 16, 224, 224] , **SCREAMING_SNAKE_CASE__ , ) -> Optional[int]:
super().__init__(**SCREAMING_SNAKE_CASE__ )
A__ = num_latents
A__ = d_latents
A__ = d_model
A__ = num_blocks
A__ = num_self_attends_per_block
A__ = num_self_attention_heads
A__ = num_cross_attention_heads
A__ = qk_channels
A__ = v_channels
A__ = cross_attention_shape_for_attention
A__ = self_attention_widening_factor
A__ = cross_attention_widening_factor
A__ = hidden_act
A__ = attention_probs_dropout_prob
A__ = initializer_range
A__ = layer_norm_eps
A__ = use_query_residual
# masked language modeling attributes
A__ = vocab_size
A__ = max_position_embeddings
# image classification attributes
A__ = image_size
# flow attributes
A__ = train_size
# multimodal autoencoding attributes
A__ = num_frames
A__ = audio_samples_per_frame
A__ = samples_per_patch
A__ = output_shape
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
@property
def snake_case__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
A__ = {0: "batch", 1: "choice", 2: "sequence"}
else:
A__ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def snake_case__ ( self ) -> float:
return 1e-4
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 3 , SCREAMING_SNAKE_CASE__ = 40 , SCREAMING_SNAKE_CASE__ = 40 , ) -> Mapping[str, Any]:
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A__ = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A__ = preprocessor.num_special_tokens_to_add(SCREAMING_SNAKE_CASE__ )
A__ = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=SCREAMING_SNAKE_CASE__ )
# Generate dummy inputs according to compute batch and sequence
A__ = [" ".join(["a"] ) * seq_length] * batch_size
A__ = dict(preprocessor(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ ) )
A__ = inputs.pop("input_ids" )
return inputs
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A__ = compute_effective_axis_dimension(SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_batch )
A__ = self._generate_dummy_images(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = dict(preprocessor(images=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ ) )
A__ = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 104
| 1
|
'''simple docstring'''
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowerCAmelCase : int = logging.get_logger(__name__)
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : str = ["""input_values""", """attention_mask"""]
def __init__( self , UpperCamelCase = 1 , UpperCamelCase = 1_6000 , UpperCamelCase = 0.0 , UpperCamelCase = False , UpperCamelCase = 80 , UpperCamelCase = 16 , UpperCamelCase = 64 , UpperCamelCase = "hann_window" , UpperCamelCase = 1.0 , UpperCamelCase = 80 , UpperCamelCase = 7600 , UpperCamelCase = 1E-10 , UpperCamelCase = 2 , UpperCamelCase = True , **UpperCamelCase , ) -> Union[str, Any]:
super().__init__(feature_size=UpperCamelCase , sampling_rate=UpperCamelCase , padding_value=UpperCamelCase , **UpperCamelCase )
__lowerCAmelCase = do_normalize
__lowerCAmelCase = return_attention_mask
__lowerCAmelCase = num_mel_bins
__lowerCAmelCase = hop_length
__lowerCAmelCase = win_length
__lowerCAmelCase = win_function
__lowerCAmelCase = frame_signal_scale
__lowerCAmelCase = fmin
__lowerCAmelCase = fmax
__lowerCAmelCase = mel_floor
__lowerCAmelCase = reduction_factor
__lowerCAmelCase = win_length * sampling_rate // 1000
__lowerCAmelCase = hop_length * sampling_rate // 1000
__lowerCAmelCase = optimal_fft_length(self.sample_size )
__lowerCAmelCase = (self.n_fft // 2) + 1
__lowerCAmelCase = window_function(window_length=self.sample_size , name=self.win_function , periodic=UpperCamelCase )
__lowerCAmelCase = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm="slaney" , mel_scale="slaney" , )
if frame_signal_scale != 1.0:
warnings.warn(
"The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers" , UpperCamelCase , )
if reduction_factor != 2.0:
warnings.warn(
"The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers" , UpperCamelCase , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase , UpperCamelCase = 0.0 ) -> List[np.ndarray]:
if attention_mask is not None:
__lowerCAmelCase = np.array(UpperCamelCase , np.intaa )
__lowerCAmelCase = []
for vector, length in zip(UpperCamelCase , attention_mask.sum(-1 ) ):
__lowerCAmelCase = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
__lowerCAmelCase = padding_value
normed_input_values.append(UpperCamelCase )
else:
__lowerCAmelCase = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def UpperCAmelCase_ ( self , UpperCamelCase , ) -> np.ndarray:
__lowerCAmelCase = spectrogram(
UpperCamelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel="log10" , )
return log_mel_spec.T
def __call__( self , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = False , UpperCamelCase = None , UpperCamelCase = False , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , **UpperCamelCase , ) -> BatchFeature:
if audio is None and audio_target is None:
raise ValueError("You must provide either `audio` or `audio_target` values." )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if audio is not None:
__lowerCAmelCase = self._process_audio(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase , )
else:
__lowerCAmelCase = None
if audio_target is not None:
__lowerCAmelCase = self._process_audio(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase , )
if inputs is None:
return inputs_target
else:
__lowerCAmelCase = inputs_target["input_values"]
__lowerCAmelCase = inputs_target.get("attention_mask" )
if decoder_attention_mask is not None:
__lowerCAmelCase = decoder_attention_mask
return inputs
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = False , UpperCamelCase = False , UpperCamelCase = None , UpperCamelCase = False , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , **UpperCamelCase , ) -> BatchFeature:
__lowerCAmelCase = isinstance(UpperCamelCase , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
__lowerCAmelCase = is_batched_numpy or (
isinstance(UpperCamelCase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__lowerCAmelCase = [np.asarray(UpperCamelCase , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(UpperCamelCase , np.ndarray ):
__lowerCAmelCase = np.asarray(UpperCamelCase , dtype=np.floataa )
elif isinstance(UpperCamelCase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
__lowerCAmelCase = speech.astype(np.floataa )
# always return batch
if not is_batched:
__lowerCAmelCase = [speech]
# needed to make pad() work on spectrogram inputs
__lowerCAmelCase = self.feature_size
# convert into correct format for padding
if is_target:
__lowerCAmelCase = [self._extract_mel_features(UpperCamelCase ) for waveform in speech]
__lowerCAmelCase = BatchFeature({"input_values": features} )
__lowerCAmelCase = self.num_mel_bins
else:
__lowerCAmelCase = BatchFeature({"input_values": speech} )
__lowerCAmelCase = self.pad(
UpperCamelCase , padding=UpperCamelCase , max_length=UpperCamelCase , truncation=UpperCamelCase , pad_to_multiple_of=UpperCamelCase , return_attention_mask=UpperCamelCase , **UpperCamelCase , )
__lowerCAmelCase = feature_size_hack
# convert input values to correct format
__lowerCAmelCase = padded_inputs["input_values"]
if not isinstance(input_values[0] , np.ndarray ):
__lowerCAmelCase = [np.asarray(UpperCamelCase , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(UpperCamelCase , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
__lowerCAmelCase = [array.astype(np.floataa ) for array in input_values]
elif isinstance(UpperCamelCase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
__lowerCAmelCase = input_values.astype(np.floataa )
# convert attention_mask to correct format
__lowerCAmelCase = padded_inputs.get("attention_mask" )
if attention_mask is not None:
__lowerCAmelCase = [np.asarray(UpperCamelCase , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
__lowerCAmelCase = (
attention_mask
if self._get_padding_strategies(UpperCamelCase , max_length=UpperCamelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
__lowerCAmelCase = self.zero_mean_unit_var_norm(
padded_inputs["input_values"] , attention_mask=UpperCamelCase , padding_value=self.padding_value )
if return_tensors is not None:
__lowerCAmelCase = padded_inputs.convert_to_tensors(UpperCamelCase )
return padded_inputs
def UpperCAmelCase_ ( self ) -> Dict[str, Any]:
__lowerCAmelCase = super().to_dict()
# Don't serialize these as they are derived from the other properties.
__lowerCAmelCase = ["window", "mel_filters", "sample_size", "sample_stride", "n_fft", "n_freqs"]
for name in names:
if name in output:
del output[name]
return output
| 702
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
lowerCAmelCase : Optional[Any] = {
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = [
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 39
| 0
|
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any]=14 , _lowerCAmelCase : int=7 , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Any=True , _lowerCAmelCase : List[Any]=99 , _lowerCAmelCase : Dict=32 , _lowerCAmelCase : Optional[int]=5 , _lowerCAmelCase : int=4 , _lowerCAmelCase : Union[str, Any]=37 , _lowerCAmelCase : List[str]="gelu" , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : Tuple=512 , _lowerCAmelCase : Dict=16 , _lowerCAmelCase : Any=2 , _lowerCAmelCase : Tuple=0.02 , _lowerCAmelCase : List[Any]=3 , _lowerCAmelCase : Union[str, Any]=4 , _lowerCAmelCase : List[str]=None , ):
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = seq_length
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_token_type_ids
SCREAMING_SNAKE_CASE_ = use_input_mask
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = use_mc_token_ids
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = num_choices
SCREAMING_SNAKE_CASE_ = scope
SCREAMING_SNAKE_CASE_ = self.vocab_size - 1
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ = None
if self.use_mc_token_ids:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE_ = self.get_config()
SCREAMING_SNAKE_CASE_ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCAmelCase_ ( self : Optional[int] ):
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] , *_lowerCAmelCase : int ):
SCREAMING_SNAKE_CASE_ = CTRLModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase , head_mask=_lowerCAmelCase )
model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Dict , *_lowerCAmelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = CTRLLMHeadModel(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask}
return config, inputs_dict
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : str , *_lowerCAmelCase : List[str] ):
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = CTRLForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
lowercase_ = (CTRLLMHeadModel,) if is_torch_available() else ()
lowercase_ = (
{
"feature-extraction": CTRLModel,
"text-classification": CTRLForSequenceClassification,
"text-generation": CTRLLMHeadModel,
"zero-shot": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase_ = True
lowercase_ = False
lowercase_ = False
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any] ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ = CTRLModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_lowerCAmelCase , n_embd=37 )
def lowerCAmelCase_ ( self : int ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*_lowerCAmelCase )
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_lowerCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCAmelCase_ ( self : int ):
pass
@slow
def lowerCAmelCase_ ( self : str ):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = CTRLModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :)
def lowerCAmelCase_ ( self : int ):
pass
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : str ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = CTRLLMHeadModel.from_pretrained('ctrl' )
model.to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[11_859, 0, 1_611, 8]] , dtype=torch.long , device=_lowerCAmelCase ) # Legal the president is
SCREAMING_SNAKE_CASE_ = [
11_859,
0,
1_611,
8,
5,
150,
26_449,
2,
19,
348,
469,
3,
2_595,
48,
20_740,
246_533,
246_533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
SCREAMING_SNAKE_CASE_ = model.generate(_lowerCAmelCase , do_sample=_lowerCAmelCase )
self.assertListEqual(output_ids[0].tolist() , _lowerCAmelCase )
| 31
|
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : str ):
SCREAMING_SNAKE_CASE_ = {}
def lowerCAmelCase_ ( self : List[str] ):
print(self.vertex )
for i in self.vertex:
print(_lowerCAmelCase , ' -> ' , ' -> '.join([str(_lowerCAmelCase ) for j in self.vertex[i]] ) )
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : int ):
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(_lowerCAmelCase )
else:
# else make a new vertex
SCREAMING_SNAKE_CASE_ = [to_vertex]
def lowerCAmelCase_ ( self : Optional[Any] ):
# visited array for storing already visited nodes
SCREAMING_SNAKE_CASE_ = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : list ):
# mark start vertex as visited
SCREAMING_SNAKE_CASE_ = True
print(_lowerCAmelCase , end=' ' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ : List[Any] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 31
| 1
|
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowercase :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=1_3 , _lowerCamelCase=3_2 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=[1_0, 2_0, 3_0, 4_0] , _lowerCamelCase=[2, 2, 3, 2] , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=3_7 , _lowerCamelCase="gelu" , _lowerCamelCase=1_0 , _lowerCamelCase=0.0_2 , _lowerCamelCase=["stage2", "stage3", "stage4"] , _lowerCamelCase=3 , _lowerCamelCase=None , ):
__UpperCamelCase : str = parent
__UpperCamelCase : Optional[int] = batch_size
__UpperCamelCase : int = image_size
__UpperCamelCase : int = num_channels
__UpperCamelCase : Dict = num_stages
__UpperCamelCase : Union[str, Any] = hidden_sizes
__UpperCamelCase : int = depths
__UpperCamelCase : Union[str, Any] = is_training
__UpperCamelCase : Optional[int] = use_labels
__UpperCamelCase : Tuple = intermediate_size
__UpperCamelCase : Optional[int] = hidden_act
__UpperCamelCase : Optional[int] = type_sequence_label_size
__UpperCamelCase : List[Any] = initializer_range
__UpperCamelCase : Union[str, Any] = out_features
__UpperCamelCase : Any = num_labels
__UpperCamelCase : str = scope
__UpperCamelCase : List[str] = num_stages
def lowerCAmelCase ( self ):
__UpperCamelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase : Optional[Any] = None
if self.use_labels:
__UpperCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self ):
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def lowerCAmelCase ( self ):
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_1_2 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_lowerCamelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=4_0 , auxiliary_channels=2_5_6 , auxiliary_num_convs=1 , auxiliary_concat_input=_lowerCamelCase , loss_ignore_index=2_5_5 , num_labels=self.num_labels , )
def lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
__UpperCamelCase : str = UperNetForSemanticSegmentation(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__UpperCamelCase : int = model(_lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def lowerCAmelCase ( self ):
__UpperCamelCase : List[Any] = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) : Optional[int] = config_and_inputs
__UpperCamelCase : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ = {'''image-segmentation''': UperNetForSemanticSegmentation} if is_torch_available() else {}
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def lowerCAmelCase ( self ):
__UpperCamelCase : int = UperNetModelTester(self )
__UpperCamelCase : Dict = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=3_7 )
def lowerCAmelCase ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase ( self ):
return
def lowerCAmelCase ( self ):
__UpperCamelCase , __UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase : Any = model_class(_lowerCamelCase )
__UpperCamelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase : Union[str, Any] = [*signature.parameters.keys()]
__UpperCamelCase : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def lowerCAmelCase ( self ):
__UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowerCamelCase )
@unittest.skip(reason='UperNet does not use inputs_embeds' )
def lowerCAmelCase ( self ):
pass
@unittest.skip(reason='UperNet does not support input and output embeddings' )
def lowerCAmelCase ( self ):
pass
@unittest.skip(reason='UperNet does not have a base model' )
def lowerCAmelCase ( self ):
pass
@unittest.skip(reason='UperNet does not have a base model' )
def lowerCAmelCase ( self ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def lowerCAmelCase ( self ):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCAmelCase ( self ):
pass
def lowerCAmelCase ( self ):
def check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
__UpperCamelCase : List[str] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
__UpperCamelCase : Union[str, Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
__UpperCamelCase : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCamelCase : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__UpperCamelCase , __UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase : Tuple = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCamelCase : Tuple = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def lowerCAmelCase ( self ):
__UpperCamelCase , __UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : List[Any] = _config_zero_init(_lowerCamelCase )
__UpperCamelCase : List[str] = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
__UpperCamelCase : Tuple = model_class(config=_lowerCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason='UperNet does not have tied weights' )
def lowerCAmelCase ( self ):
pass
@slow
def lowerCAmelCase ( self ):
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : Tuple = UperNetForSemanticSegmentation.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def _UpperCamelCase ( ):
"""simple docstring"""
__UpperCamelCase : Any = hf_hub_download(
repo_id='hf-internal-testing/fixtures_ade20k' , repo_type='dataset' , filename='ADE_val_00000001.jpg' )
__UpperCamelCase : Optional[Any] = Image.open(_a ).convert('RGB' )
return image
@require_torch
@require_vision
@slow
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self ):
__UpperCamelCase : Dict = AutoImageProcessor.from_pretrained('openmmlab/upernet-swin-tiny' )
__UpperCamelCase : List[str] = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-swin-tiny' ).to(_lowerCamelCase )
__UpperCamelCase : Optional[Any] = prepare_img()
__UpperCamelCase : Optional[int] = processor(images=_lowerCamelCase , return_tensors='pt' ).to(_lowerCamelCase )
with torch.no_grad():
__UpperCamelCase : Dict = model(**_lowerCamelCase )
__UpperCamelCase : int = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
__UpperCamelCase : List[Any] = torch.tensor(
[[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _lowerCamelCase , atol=1E-4 ) )
def lowerCAmelCase ( self ):
__UpperCamelCase : Dict = AutoImageProcessor.from_pretrained('openmmlab/upernet-convnext-tiny' )
__UpperCamelCase : List[str] = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-convnext-tiny' ).to(_lowerCamelCase )
__UpperCamelCase : str = prepare_img()
__UpperCamelCase : List[Any] = processor(images=_lowerCamelCase , return_tensors='pt' ).to(_lowerCamelCase )
with torch.no_grad():
__UpperCamelCase : Dict = model(**_lowerCamelCase )
__UpperCamelCase : Optional[int] = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
__UpperCamelCase : List[Any] = torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _lowerCamelCase , atol=1E-4 ) )
| 287
|
'''simple docstring'''
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self ):
__UpperCamelCase : Dict = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
__UpperCamelCase : Dict = Vector()
def lowerCAmelCase ( self ):
__UpperCamelCase : str = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(_lowerCamelCase ) , '(0,0,0,0,0,1)' )
def lowerCAmelCase ( self ):
__UpperCamelCase : Dict = Vector([1, 2, 3, 4] )
self.assertEqual(len(_lowerCamelCase ) , 4 )
def lowerCAmelCase ( self ):
__UpperCamelCase : Any = Vector([1, 2] )
__UpperCamelCase : Optional[int] = Vector([1, 2, 3, 4, 5] )
__UpperCamelCase : Optional[Any] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
__UpperCamelCase : List[str] = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.2_3_6 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.4_1_6 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.6_1_6 , 3 )
def lowerCAmelCase ( self ):
__UpperCamelCase : int = Vector([1, 2, 3] )
__UpperCamelCase : List[str] = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def lowerCAmelCase ( self ):
__UpperCamelCase : Optional[Any] = Vector([1, 2, 3] )
__UpperCamelCase : Dict = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def lowerCAmelCase ( self ):
__UpperCamelCase : Dict = Vector([1, 2, 3] )
__UpperCamelCase : int = Vector([2, -1, 4] ) # for test of dot product
__UpperCamelCase : int = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , '(3.0,6.0,9.0)' )
self.assertEqual((a * b) , 0 )
def lowerCAmelCase ( self ):
self.assertEqual(str(zero_vector(1_0 ) ).count('0' ) , 1_0 )
def lowerCAmelCase ( self ):
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '(0,1,0)' )
def lowerCAmelCase ( self ):
__UpperCamelCase : Optional[int] = Vector([1, 2, 3] )
__UpperCamelCase : Union[str, Any] = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , _lowerCamelCase , _lowerCamelCase ) ) , '(3,4,7)' )
def lowerCAmelCase ( self ):
__UpperCamelCase : List[str] = Vector([1, 0, 0, 0, 0, 0] )
__UpperCamelCase : Optional[Any] = x.copy()
self.assertEqual(str(_lowerCamelCase ) , str(_lowerCamelCase ) )
def lowerCAmelCase ( self ):
__UpperCamelCase : List[str] = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(_lowerCamelCase ) , '(0,1,0)' )
def lowerCAmelCase ( self ):
__UpperCamelCase : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual('|1,2,3|\n|2,4,5|\n|6,7,8|\n' , str(_lowerCamelCase ) )
def lowerCAmelCase ( self ):
__UpperCamelCase : Optional[int] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__UpperCamelCase : List[str] = [[-3, -1_4, -1_0], [-5, -1_0, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(_lowerCamelCase , _lowerCamelCase ) )
def lowerCAmelCase ( self ):
__UpperCamelCase : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__UpperCamelCase : List[str] = [[-3, 1_4, -1_0], [5, -1_0, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(_lowerCamelCase , _lowerCamelCase ) )
def lowerCAmelCase ( self ):
__UpperCamelCase : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def lowerCAmelCase ( self ):
__UpperCamelCase : int = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
__UpperCamelCase : Union[str, Any] = Vector([1, 2, 3] )
self.assertEqual('(14,32,50)' , str(a * x ) )
self.assertEqual('|2,4,6|\n|8,10,12|\n|14,16,18|\n' , str(a * 2 ) )
def lowerCAmelCase ( self ):
__UpperCamelCase : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual('|1,2,5|\n|2,4,5|\n|6,7,8|\n' , str(_lowerCamelCase ) )
def lowerCAmelCase ( self ):
__UpperCamelCase : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.0_1 )
def lowerCAmelCase ( self ):
__UpperCamelCase : List[str] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__UpperCamelCase : Optional[Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 1_0]] , 3 , 3 )
self.assertEqual('|2,4,10|\n|4,8,10|\n|12,14,18|\n' , str(a + b ) )
def lowerCAmelCase ( self ):
__UpperCamelCase : Optional[int] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__UpperCamelCase : Optional[int] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 1_0]] , 3 , 3 )
self.assertEqual('|0,0,-4|\n|0,0,0|\n|0,0,-2|\n' , str(a - b ) )
def lowerCAmelCase ( self ):
self.assertEqual(
'|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n' , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 287
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class A ( unittest.TestCase ):
def __init__( self : List[Any] , __magic_name__ : List[str] , __magic_name__ : Dict=7 , __magic_name__ : int=3 , __magic_name__ : int=18 , __magic_name__ : Optional[Any]=30 , __magic_name__ : Tuple=400 , __magic_name__ : Dict=True , __magic_name__ : Dict=None , __magic_name__ : Tuple=True , __magic_name__ : int=None , ):
"""simple docstring"""
lowerCAmelCase__ = size if size is not None else {"shortest_edge": 20}
lowerCAmelCase__ = crop_size if crop_size is not None else {"height": 18, "width": 18}
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = image_size
lowerCAmelCase__ = min_resolution
lowerCAmelCase__ = max_resolution
lowerCAmelCase__ = do_resize
lowerCAmelCase__ = size
lowerCAmelCase__ = do_center_crop
lowerCAmelCase__ = crop_size
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ :List[str] = MobileNetVaImageProcessor if is_vision_available() else None
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = MobileNetVaImageProcessingTester(self )
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , "do_resize" ) )
self.assertTrue(hasattr(__magic_name__ , "size" ) )
self.assertTrue(hasattr(__magic_name__ , "do_center_crop" ) )
self.assertTrue(hasattr(__magic_name__ , "crop_size" ) )
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
lowerCAmelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 20} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
lowerCAmelCase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCAmelCase__ = image_processing(__magic_name__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , numpify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , np.ndarray )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCAmelCase__ = image_processing(__magic_name__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCAmelCase__ = image_processing(__magic_name__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 48
|
def lowercase ( _a ) -> int:
if not isinstance(_a ,_a ) or number < 0:
raise ValueError("Input must be a non-negative integer" )
UpperCAmelCase_: List[Any] = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 137
| 0
|
from __future__ import annotations
snake_case__ = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def lowerCamelCase__ ( a : list[list[int]] , a : list[int] , a : list[int] , a : int , a : list[list[int]] , ) -> tuple[list[list[int]], list[list[int]]]:
"""simple docstring"""
a__ :Any = [
[0 for col in range(len(grid[0] ) )] for row in range(len(a_ ) )
] # the reference grid
a__ :Optional[Any] = 1
a__ :Dict = [
[0 for col in range(len(grid[0] ) )] for row in range(len(a_ ) )
] # the action grid
a__ :Any = init[0]
a__ :Any = init[1]
a__ :List[Any] = 0
a__ :Union[str, Any] = g + heuristic[x][y] # cost from starting cell to destination cell
a__ :int = [[f, g, x, y]]
a__ :Optional[Any] = False # flag that is set when search is complete
a__ :List[Any] = False # flag set if we can't find expand
while not found and not resign:
if len(a_ ) == 0:
raise ValueError("Algorithm is unable to find solution" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
a__ :List[str] = cell.pop()
a__ :Optional[int] = next_cell[2]
a__ :int = next_cell[3]
a__ :Union[str, Any] = next_cell[1]
if x == goal[0] and y == goal[1]:
a__ :Optional[int] = True
else:
for i in range(len(a_ ) ): # to try out different valid actions
a__ :List[Any] = x + DIRECTIONS[i][0]
a__ :str = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(a_ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
a__ :List[Any] = g + cost
a__ :Optional[Any] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
a__ :Any = 1
a__ :int = i
a__ :Tuple = []
a__ :int = goal[0]
a__ :Union[str, Any] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
a__ :str = x - DIRECTIONS[action[x][y]][0]
a__ :str = y - DIRECTIONS[action[x][y]][1]
a__ :List[Any] = xa
a__ :Union[str, Any] = ya
invpath.append([x, y] )
a__ :Optional[Any] = []
for i in range(len(a_ ) ):
path.append(invpath[len(a_ ) - 1 - i] )
return path, action
if __name__ == "__main__":
snake_case__ = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
snake_case__ = [0, 0]
# all coordinates are given in format [y,x]
snake_case__ = [len(grid) - 1, len(grid[0]) - 1]
snake_case__ = 1
# the cost map which pushes the path closer to the goal
snake_case__ = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
snake_case__ = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
snake_case__ = 99
snake_case__ ,snake_case__ = search(grid, init, goal, cost, heuristic)
print('''ACTION MAP''')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 713
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
'''google/pegasus-large''': '''https://huggingface.co/google/pegasus-large/resolve/main/config.json''',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class lowerCAmelCase_ ( _a):
lowerCamelCase_ = 'pegasus'
lowerCamelCase_ = ['past_key_values']
lowerCamelCase_ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Any , __A : Dict=50265 , __A : List[Any]=1024 , __A : int=12 , __A : Optional[Any]=4096 , __A : Optional[int]=16 , __A : Dict=12 , __A : List[Any]=4096 , __A : List[str]=16 , __A : Optional[int]=0.0 , __A : List[Any]=0.0 , __A : List[str]=True , __A : Optional[int]=True , __A : str="gelu" , __A : Tuple=1024 , __A : Any=0.1 , __A : List[Any]=0.0 , __A : List[str]=0.0 , __A : Tuple=0.02 , __A : Union[str, Any]=0 , __A : Union[str, Any]=False , __A : Optional[Any]=0 , __A : Tuple=1 , __A : str=1 , **__A : Any , ) ->Union[str, Any]:
"""simple docstring"""
a__ :Any = vocab_size
a__ :List[str] = max_position_embeddings
a__ :int = d_model
a__ :Union[str, Any] = encoder_ffn_dim
a__ :List[Any] = encoder_layers
a__ :Union[str, Any] = encoder_attention_heads
a__ :Tuple = decoder_ffn_dim
a__ :List[Any] = decoder_layers
a__ :Tuple = decoder_attention_heads
a__ :Optional[int] = dropout
a__ :str = attention_dropout
a__ :Optional[int] = activation_dropout
a__ :str = activation_function
a__ :Dict = init_std
a__ :Any = encoder_layerdrop
a__ :int = decoder_layerdrop
a__ :Union[str, Any] = use_cache
a__ :List[Any] = encoder_layers
a__ :Dict = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__A , eos_token_id=__A , is_encoder_decoder=__A , decoder_start_token_id=__A , forced_eos_token_id=__A , **__A , )
@property
def _snake_case ( self : Optional[Any] ) ->int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def _snake_case ( self : Union[str, Any] ) ->int:
"""simple docstring"""
return self.d_model
| 373
| 0
|
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
snake_case = logging.get_logger(__name__)
snake_case = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
snake_case = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
snake_case = {
"""allenai/led-base-16384""": 16_384,
}
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[int] = VOCAB_FILES_NAMES
UpperCamelCase_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : Optional[Any] = LEDTokenizer
UpperCamelCase_ : Union[str, Any] = ['''input_ids''', '''attention_mask''']
def __init__( self : List[str] , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Dict="replace" , UpperCAmelCase_ : Any="<s>" , UpperCAmelCase_ : Optional[int]="</s>" , UpperCAmelCase_ : Optional[int]="</s>" , UpperCAmelCase_ : Union[str, Any]="<s>" , UpperCAmelCase_ : Tuple="<unk>" , UpperCAmelCase_ : Tuple="<pad>" , UpperCAmelCase_ : Optional[int]="<mask>" , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Dict=True , **UpperCAmelCase_ : Union[str, Any] , ):
super().__init__(
UpperCAmelCase_ , UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , errors=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_ , **UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCAmelCase_ ) != add_prefix_space:
SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(UpperCAmelCase_ , pre_tok_state.pop("type" ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = add_prefix_space
SCREAMING_SNAKE_CASE : Dict = pre_tok_class(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE : int = "post_processor"
SCREAMING_SNAKE_CASE : List[str] = getattr(self.backend_tokenizer , UpperCAmelCase_ , UpperCAmelCase_ )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE : Optional[Any] = tuple(state["sep"] )
if "cls" in state:
SCREAMING_SNAKE_CASE : str = tuple(state["cls"] )
SCREAMING_SNAKE_CASE : List[str] = False
if state.get("add_prefix_space" , UpperCAmelCase_ ) != add_prefix_space:
SCREAMING_SNAKE_CASE : Union[str, Any] = add_prefix_space
SCREAMING_SNAKE_CASE : Any = True
if state.get("trim_offsets" , UpperCAmelCase_ ) != trim_offsets:
SCREAMING_SNAKE_CASE : Any = trim_offsets
SCREAMING_SNAKE_CASE : Optional[int] = True
if changes_to_apply:
SCREAMING_SNAKE_CASE : List[str] = getattr(UpperCAmelCase_ , state.pop("type" ) )
SCREAMING_SNAKE_CASE : Any = component_class(**UpperCAmelCase_ )
setattr(self.backend_tokenizer , UpperCAmelCase_ , UpperCAmelCase_ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _A ( self : Tuple ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def _A ( self : int , UpperCAmelCase_ : Any ):
SCREAMING_SNAKE_CASE : Optional[int] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else value
SCREAMING_SNAKE_CASE : Tuple = value
def _A ( self : Optional[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : List[Any] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.get("is_split_into_words" , UpperCAmelCase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ )
def _A ( self : Optional[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Dict ):
SCREAMING_SNAKE_CASE : Tuple = kwargs.get("is_split_into_words" , UpperCAmelCase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ )
def _A ( self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ):
SCREAMING_SNAKE_CASE : str = self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
def _A ( self : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any]=None ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _A ( self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE : Dict = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _A ( self : str , UpperCAmelCase_ : Union[Dict[str, EncodedInput], BatchEncoding] , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[bool] = None , ):
SCREAMING_SNAKE_CASE : List[Any] = super()._pad(
encoded_inputs=UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding_strategy=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , )
# Load from model defaults
if return_attention_mask is None:
SCREAMING_SNAKE_CASE : int = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
SCREAMING_SNAKE_CASE : int = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
SCREAMING_SNAKE_CASE : Optional[int] = len(encoded_inputs["global_attention_mask"] ) != len(UpperCAmelCase_ )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE : Optional[Any] = len(UpperCAmelCase_ ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
SCREAMING_SNAKE_CASE : Any = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
SCREAMING_SNAKE_CASE : str = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 62
|
'''simple docstring'''
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def __UpperCAmelCase ( a_: Optional[Any] ):
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings", set() )
@pytest.fixture
def __UpperCAmelCase ( a_: Tuple ):
class A__ :
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = metric_id
class A__ :
"""simple docstring"""
UpperCamelCase_ : Dict = [MetricMock(UpperCamelCase ) for metric_id in ['''accuracy''', '''mse''', '''precision''', '''codeparrot/apps_metric''']]
def _lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub", HfhMock() )
@pytest.mark.parametrize(
"func, args", [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def __UpperCAmelCase ( a_: str, a_: str, a_: Tuple, a_: Any, a_: List[Any] ):
if "tmp_path" in args:
_UpperCAmelCase : Union[str, Any] = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(a_, match="https://huggingface.co/docs/evaluate" ):
func(*a_ )
| 494
| 0
|
'''simple docstring'''
from collections import deque
from .hash_table import HashTable
class SCREAMING_SNAKE_CASE (a__ ):
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : str = deque([]) if self.values[key] is None else self.values[key]
self.values[key].appendleft(_UpperCAmelCase)
__A : Optional[int] = self.values[key]
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return (
sum(self.charge_factor - len(_UpperCAmelCase) for slot in self.values)
/ self.size_table
* self.charge_factor
)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase=None):
'''simple docstring'''
if not (
len(self.values[key]) == self.charge_factor and self.values.count(_UpperCAmelCase) == 0
):
return key
return super()._collision_resolution(_UpperCAmelCase , _UpperCAmelCase)
| 338
|
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowercase__ : List[str] = 16
lowercase__ : List[Any] = 32
def _lowerCAmelCase ( __snake_case : Accelerator , __snake_case : int = 16 , __snake_case : str = "bert-base-cased" ) -> Optional[Any]:
__A : List[str] = AutoTokenizer.from_pretrained(__snake_case )
__A : List[str] = load_dataset('glue' , 'mrpc' )
def tokenize_function(__snake_case : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
__A : Optional[int] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__snake_case , max_length=__snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__A : Any = datasets.map(
__snake_case , batched=__snake_case , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=__snake_case )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__A : Optional[Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(__snake_case : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__snake_case , padding='max_length' , max_length=1_28 , return_tensors='pt' )
return tokenizer.pad(__snake_case , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
__A : Tuple = DataLoader(
tokenized_datasets['train'] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
__A : Tuple = DataLoader(
tokenized_datasets['validation'] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
return train_dataloader, eval_dataloader
def _lowerCAmelCase ( __snake_case : List[Any] , __snake_case : Any , __snake_case : List[str] , __snake_case : Optional[Any] ) -> List[str]:
model.eval()
__A : int = 0
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__A : Optional[Any] = model(**__snake_case )
__A : Optional[Any] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__A ,__A : Optional[int] = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__snake_case ) - 1:
__A : Dict = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__A : Union[str, Any] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__snake_case , references=__snake_case , )
__A : Optional[Any] = metric.compute()
return eval_metric["accuracy"]
def _lowerCAmelCase ( __snake_case : Any , __snake_case : Optional[Any] ) -> Optional[int]:
# Initialize accelerator
__A : Union[str, Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__A : Dict = config['lr']
__A : str = int(config['num_epochs'] )
__A : Dict = int(config['seed'] )
__A : str = int(config['batch_size'] )
__A : str = args.model_name_or_path
set_seed(__snake_case )
__A ,__A : Tuple = get_dataloaders(__snake_case , __snake_case , __snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__A : Dict = AutoModelForSequenceClassification.from_pretrained(__snake_case , return_dict=__snake_case )
# Instantiate optimizer
__A : List[Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__A : str = optimizer_cls(params=model.parameters() , lr=__snake_case )
if accelerator.state.deepspeed_plugin is not None:
__A : int = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
__A : int = 1
__A : Tuple = (len(__snake_case ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__A : str = get_linear_schedule_with_warmup(
optimizer=__snake_case , num_warmup_steps=0 , num_training_steps=__snake_case , )
else:
__A : Optional[Any] = DummyScheduler(__snake_case , total_num_steps=__snake_case , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__A ,__A ,__A ,__A ,__A : Optional[int] = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# We need to keep track of how many total steps we have iterated over
__A : Optional[int] = 0
# We also need to keep track of the stating epoch so files are named properly
__A : List[str] = 0
__A : str = evaluate.load('glue' , 'mrpc' )
__A : Tuple = num_epochs
if args.partial_train_epoch is not None:
__A : List[str] = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
__A : Union[str, Any] = args.resume_from_checkpoint.split('epoch_' )[1]
__A : List[str] = ''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
__A : Tuple = int(__snake_case ) + 1
__A : Optional[int] = evaluation_loop(__snake_case , __snake_case , __snake_case , __snake_case )
accelerator.print('resumed checkpoint performance:' , __snake_case )
accelerator.print('resumed checkpoint\'s scheduler\'s lr:' , lr_scheduler.get_lr()[0] )
accelerator.print('resumed optimizers\'s lr:' , optimizer.param_groups[0]['lr'] )
with open(os.path.join(args.output_dir , f'state_{starting_epoch-1}.json' ) , 'r' ) as f:
__A : Optional[int] = json.load(__snake_case )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
__A : Any = {}
for epoch in range(__snake_case , __snake_case ):
model.train()
for step, batch in enumerate(__snake_case ):
__A : List[str] = model(**__snake_case )
__A : Union[str, Any] = outputs.loss
__A : Any = loss / gradient_accumulation_steps
accelerator.backward(__snake_case )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
__A : List[str] = f'epoch_{epoch}'
__A : Optional[int] = os.path.join(args.output_dir , __snake_case )
accelerator.save_state(__snake_case )
__A : Union[str, Any] = evaluation_loop(__snake_case , __snake_case , __snake_case , __snake_case )
__A : List[str] = accuracy
__A : Union[str, Any] = lr_scheduler.get_lr()[0]
__A : List[str] = optimizer.param_groups[0]['lr']
__A : int = epoch
__A : str = overall_step
accelerator.print(f'epoch {epoch}:' , __snake_case )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'state_{epoch}.json' ) , 'w' ) as f:
json.dump(__snake_case , __snake_case )
def _lowerCAmelCase ( ) -> Dict:
__A : Tuple = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=__snake_case , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=__snake_case , )
parser.add_argument(
'--output_dir' , type=__snake_case , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=__snake_case , default=__snake_case , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--partial_train_epoch' , type=__snake_case , default=__snake_case , help='If passed, the training will stop after this number of epochs.' , )
parser.add_argument(
'--num_epochs' , type=__snake_case , default=2 , help='Number of train epochs.' , )
__A : int = parser.parse_args()
__A : Optional[Any] = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(__snake_case , __snake_case )
if __name__ == "__main__":
main()
| 338
| 1
|
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> int:
lowerCAmelCase = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowerCAmelCase = {
'''wmt16-en-de-dist-12-1''': [28.3, 27.52],
'''wmt16-en-de-dist-6-1''': [27.4, 27.11],
'''wmt16-en-de-12-1''': [26.9, 25.75],
}
lowerCAmelCase = f"{src_lang}-{tgt_lang}"
lowerCAmelCase = f"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n"
model_card_dir.mkdir(parents=snake_case__ , exist_ok=snake_case__ )
lowerCAmelCase = os.path.join(snake_case__ , '''README.md''' )
print(f"Generating {path}" )
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(snake_case__ )
# make sure we are under the root of the project
lowercase__ : List[str] = Path(__file__).resolve().parent.parent.parent
lowercase__ : str = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
lowercase__ : Dict = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name)
| 312
|
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
lowercase__ : int = trt.Logger(trt.Logger.WARNING)
lowercase__ : int = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
lowercase__ : Optional[int] = logging.getLogger(__name__)
lowercase__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--onnx_model_path''',
default=None,
type=str,
required=True,
help='''Path to ONNX model: ''',
)
parser.add_argument(
'''--output_dir''',
default=None,
type=str,
required=True,
help='''The output directory where the model checkpoints and predictions will be written.''',
)
# Other parameters
parser.add_argument(
'''--tokenizer_name''',
default='''''',
type=str,
required=True,
help='''Pretrained tokenizer name or path if not the same as model_name''',
)
parser.add_argument(
'''--version_2_with_negative''',
action='''store_true''',
help='''If true, the SQuAD examples contain some that do not have an answer.''',
)
parser.add_argument(
'''--null_score_diff_threshold''',
type=float,
default=0.0,
help='''If null_score - best_non_null is greater than the threshold predict null.''',
)
parser.add_argument(
'''--max_seq_length''',
default=3_8_4,
type=int,
help=(
'''The maximum total input sequence length after WordPiece tokenization. Sequences '''
'''longer than this will be truncated, and sequences shorter than this will be padded.'''
),
)
parser.add_argument(
'''--doc_stride''',
default=1_2_8,
type=int,
help='''When splitting up a long document into chunks, how much stride to take between chunks.''',
)
parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''')
parser.add_argument(
'''--n_best_size''',
default=2_0,
type=int,
help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''',
)
parser.add_argument(
'''--max_answer_length''',
default=3_0,
type=int,
help=(
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
),
)
parser.add_argument('''--seed''', type=int, default=4_2, help='''random seed for initialization''')
parser.add_argument(
'''--dataset_name''',
type=str,
default=None,
required=True,
help='''The name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--dataset_config_name''',
type=str,
default=None,
help='''The configuration name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.'''
)
parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''')
parser.add_argument(
'''--fp16''',
action='''store_true''',
help='''Whether to use 16-bit (mixed) precision instead of 32-bit''',
)
parser.add_argument(
'''--int8''',
action='''store_true''',
help='''Whether to use INT8''',
)
lowercase__ : List[str] = parser.parse_args()
if args.tokenizer_name:
lowercase__ : Any = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.'''
)
logger.info('''Training/evaluation parameters %s''', args)
lowercase__ : Any = args.per_device_eval_batch_size
lowercase__ : Optional[int] = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
lowercase__ : Optional[Any] = True
lowercase__ : Dict = '''temp_engine/bert-fp32.engine'''
if args.fpaa:
lowercase__ : Any = '''temp_engine/bert-fp16.engine'''
if args.inta:
lowercase__ : str = '''temp_engine/bert-int8.engine'''
# import ONNX file
if not os.path.exists('''temp_engine'''):
os.makedirs('''temp_engine''')
lowercase__ : Tuple = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, '''rb''') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
lowercase__ : Union[str, Any] = [network.get_input(i) for i in range(network.num_inputs)]
lowercase__ : str = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
lowercase__ : Union[str, Any] = 1 << 5_0
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
lowercase__ : List[str] = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
lowercase__ : Dict = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, '''wb''') as f:
f.write(engine.serialize())
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
lowerCAmelCase = np.asarray(inputs['''input_ids'''] , dtype=np.intaa )
lowerCAmelCase = np.asarray(inputs['''attention_mask'''] , dtype=np.intaa )
lowerCAmelCase = np.asarray(inputs['''token_type_ids'''] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , snake_case__ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , snake_case__ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , snake_case__ )
# start time
lowerCAmelCase = time.time()
# Run inference
context.execute_async(
bindings=[int(snake_case__ ) for d_inp in d_inputs] + [int(snake_case__ ), int(snake_case__ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(snake_case__ , snake_case__ , snake_case__ )
cuda.memcpy_dtoh_async(snake_case__ , snake_case__ , snake_case__ )
# Synchronize the stream and take time
stream.synchronize()
# end time
lowerCAmelCase = time.time()
lowerCAmelCase = end_time - start_time
lowerCAmelCase = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
lowercase__ : List[str] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowercase__ : Optional[Any] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('''Evaluation requires a dataset name''')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
lowercase__ : str = raw_datasets['''validation'''].column_names
lowercase__ : Optional[int] = '''question''' if '''question''' in column_names else column_names[0]
lowercase__ : Tuple = '''context''' if '''context''' in column_names else column_names[1]
lowercase__ : Optional[Any] = '''answers''' if '''answers''' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
lowercase__ : List[str] = tokenizer.padding_side == '''right'''
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'
f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'
)
lowercase__ : Tuple = min(args.max_seq_length, tokenizer.model_max_length)
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Any:
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
lowerCAmelCase = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
lowerCAmelCase = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='''only_second''' if pad_on_right else '''only_first''' , max_length=snake_case__ , stride=args.doc_stride , return_overflowing_tokens=snake_case__ , return_offsets_mapping=snake_case__ , padding='''max_length''' , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
lowerCAmelCase = tokenized_examples.pop('''overflow_to_sample_mapping''' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
lowerCAmelCase = []
for i in range(len(tokenized_examples['''input_ids'''] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
lowerCAmelCase = tokenized_examples.sequence_ids(snake_case__ )
lowerCAmelCase = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
lowerCAmelCase = sample_mapping[i]
tokenized_examples["example_id"].append(examples['''id'''][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
lowerCAmelCase = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['''offset_mapping'''][i] )
]
return tokenized_examples
lowercase__ : Dict = raw_datasets['''validation''']
# Validation Feature Creation
lowercase__ : Any = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='''Running tokenizer on validation dataset''',
)
lowercase__ : int = default_data_collator
lowercase__ : Optional[Any] = eval_dataset.remove_columns(['''example_id''', '''offset_mapping'''])
lowercase__ : List[Any] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__="eval" ) -> Dict:
# Post-processing: we match the start logits and end logits to answers in the original context.
lowerCAmelCase = postprocess_qa_predictions(
examples=snake_case__ , features=snake_case__ , predictions=snake_case__ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=snake_case__ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
lowerCAmelCase = [
{'''id''': k, '''prediction_text''': v, '''no_answer_probability''': 0.0} for k, v in predictions.items()
]
else:
lowerCAmelCase = [{'''id''': k, '''prediction_text''': v} for k, v in predictions.items()]
lowerCAmelCase = [{'''id''': ex['''id'''], '''answers''': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=snake_case__ , label_ids=snake_case__ )
lowercase__ : List[Any] = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''')
# Evaluation!
logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path)
with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Tuple:
return trt.volume(engine.get_binding_shape(snake_case__ ) ) * engine.get_binding_dtype(snake_case__ ).itemsize
# Allocate device memory for inputs and outputs.
lowercase__ : str = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
lowercase__ : Optional[int] = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
lowercase__ : List[Any] = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
lowercase__ : Optional[int] = cuda.mem_alloc(h_outputa.nbytes)
lowercase__ : Union[str, Any] = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
lowercase__ : Tuple = cuda.Stream()
# Evaluation
logger.info('''***** Running Evaluation *****''')
logger.info(f' Num examples = {len(eval_dataset)}')
logger.info(f' Batch size = {args.per_device_eval_batch_size}')
lowercase__ : Dict = 0.0
lowercase__ : Any = 0
lowercase__ : str = timeit.default_timer()
lowercase__ : int = None
for step, batch in enumerate(eval_dataloader):
lowercase__ , lowercase__ : Dict = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
lowercase__ , lowercase__ : List[str] = outputs
lowercase__ : List[str] = torch.tensor(start_logits)
lowercase__ : Any = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
lowercase__ : Tuple = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_0_0)
lowercase__ : Union[str, Any] = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_0_0)
lowercase__ : Tuple = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
lowercase__ : Optional[int] = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_0_0)
if all_preds is not None:
lowercase__ : Union[str, Any] = nested_truncate(all_preds, len(eval_dataset))
lowercase__ : Tuple = timeit.default_timer() - start_time
logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 1_0_0_0 / niter))
logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 1_0_0_0))
logger.info('''Total Number of Inference = %d''', niter)
lowercase__ : Optional[Any] = post_processing_function(eval_examples, eval_dataset, all_preds)
lowercase__ : str = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'Evaluation metrics: {eval_metric}')
| 312
| 1
|
"""simple docstring"""
import numpy as np
from PIL import Image
def snake_case ( UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> Optional[Any]:
lowerCamelCase : Optional[int] = np.array(a_ )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
lowerCamelCase : Dict = 0
lowerCamelCase : int = 0
lowerCamelCase : List[Any] = 0
lowerCamelCase : Dict = 0
# compute the shape of the output matrix
lowerCamelCase : Optional[Any] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
lowerCamelCase : Union[str, Any] = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
lowerCamelCase : Union[str, Any] = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCamelCase : Dict = 0
lowerCamelCase : int = 0
return updated_arr
def snake_case ( UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> Tuple:
lowerCamelCase : Optional[int] = np.array(a_ )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
lowerCamelCase : Any = 0
lowerCamelCase : Any = 0
lowerCamelCase : Any = 0
lowerCamelCase : List[Any] = 0
# compute the shape of the output matrix
lowerCamelCase : List[Any] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
lowerCamelCase : Union[str, Any] = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
lowerCamelCase : Optional[int] = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCamelCase : str = 0
lowerCamelCase : List[str] = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='avgpooling', verbose=True)
# Loading the image
__lowerCamelCase :List[Any] = Image.open('path_to_image')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 704
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
__lowerCamelCase :Any = False
@skip_mps
class A__ ( __lowercase , __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[Any] =StableDiffusionAttendAndExcitePipeline
snake_case__ : Any =False
snake_case__ : Dict =TEXT_TO_IMAGE_PARAMS
snake_case__ : Any =TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''})
snake_case__ : Dict =TEXT_TO_IMAGE_IMAGE_PARAMS
snake_case__ : str =TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def a__ ( cls: Dict )-> Tuple:
super().setUpClass()
torch.use_deterministic_algorithms(__a )
@classmethod
def a__ ( cls: Union[str, Any] )-> Any:
super().tearDownClass()
torch.use_deterministic_algorithms(__a )
def a__ ( self: Tuple )-> Union[str, Any]:
torch.manual_seed(0 )
lowerCamelCase : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__a , )
lowerCamelCase : Union[str, Any] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=__a , set_alpha_to_one=__a , )
torch.manual_seed(0 )
lowerCamelCase : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
lowerCamelCase : Optional[int] = CLIPTextModel(__a )
lowerCamelCase : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCamelCase : List[str] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def a__ ( self: Tuple , __a: int , __a: Union[str, Any]=0 )-> Optional[Any]:
if str(__a ).startswith("""mps""" ):
lowerCamelCase : Tuple = torch.manual_seed(__a )
else:
lowerCamelCase : str = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : Dict = {
"""prompt""": """a cat and a frog""",
"""token_indices""": [2, 5],
"""generator""": generator,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""max_iter_to_alter""": 2,
"""thresholds""": {0: 0.7},
}
return inputs
def a__ ( self: Dict )-> str:
lowerCamelCase : Tuple = """cpu"""
lowerCamelCase : List[str] = self.get_dummy_components()
lowerCamelCase : List[Any] = self.pipeline_class(**__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Any = self.get_dummy_inputs(__a )
lowerCamelCase : Union[str, Any] = pipe(**__a ).images
lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
lowerCamelCase : Optional[Any] = np.array(
[0.63_90_53_64, 0.62_89_73_07, 0.48_59_90_17, 0.5_13_36_24, 0.5_55_00_48, 0.45_76_95_16, 0.50_32_69_73, 0.5_02_31_39, 0.45_38_44_96] )
lowerCamelCase : Optional[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__a , 1e-3 )
def a__ ( self: int )-> Optional[Any]:
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def a__ ( self: Union[str, Any] )-> Optional[int]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def a__ ( self: Tuple )-> int:
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def a__ ( self: Dict )-> List[Any]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def a__ ( self: Optional[int] )-> Dict:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def a__ ( self: Any )-> Tuple:
super().test_save_load_local(expected_max_difference=5e-4 )
def a__ ( self: str )-> str:
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class A__ ( unittest.TestCase):
"""simple docstring"""
@classmethod
def a__ ( cls: Any )-> Tuple:
super().setUpClass()
torch.use_deterministic_algorithms(__a )
@classmethod
def a__ ( cls: Dict )-> Optional[int]:
super().tearDownClass()
torch.use_deterministic_algorithms(__a )
def a__ ( self: int )-> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : List[Any] = torch.manual_seed(51 )
lowerCamelCase : List[str] = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , safety_checker=__a , torch_dtype=torch.floataa )
pipe.to("""cuda""" )
lowerCamelCase : Dict = """a painting of an elephant with glasses"""
lowerCamelCase : Any = [5, 7]
lowerCamelCase : Tuple = pipe(
prompt=__a , token_indices=__a , guidance_scale=7.5 , generator=__a , num_inference_steps=5 , max_iter_to_alter=5 , output_type="""numpy""" , ).images[0]
lowerCamelCase : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy""" )
assert np.abs((expected_image - image).max() ) < 5e-1
| 42
| 0
|
import requests
from bsa import BeautifulSoup
def __lowerCamelCase ( lowerCamelCase__ = "AAPL" ):
"""simple docstring"""
lowercase__ : Optional[Any] = F"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
lowercase__ : Any = BeautifulSoup(requests.get(lowerCAmelCase__ ).text , "html.parser" )
lowercase__ : str = "My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div" , class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 496
|
"""simple docstring"""
from math import pow, sqrt
def UpperCAmelCase__ ( *lowerCAmelCase__ :float ) -> bool:
'''simple docstring'''
lowercase = len(lowerCAmelCase__ ) > 0 and all(value > 0.0 for value in values )
return result
def UpperCAmelCase__ ( lowerCAmelCase__ :float , lowerCAmelCase__ :float ) -> float | ValueError:
'''simple docstring'''
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(lowerCAmelCase__ , lowerCAmelCase__ )
else ValueError("""Input Error: Molar mass values must greater than 0.""" )
)
def UpperCAmelCase__ ( lowerCAmelCase__ :float , lowerCAmelCase__ :float , lowerCAmelCase__ :float ) -> float | ValueError:
'''simple docstring'''
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def UpperCAmelCase__ ( lowerCAmelCase__ :float , lowerCAmelCase__ :float , lowerCAmelCase__ :float ) -> float | ValueError:
'''simple docstring'''
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def UpperCAmelCase__ ( lowerCAmelCase__ :float , lowerCAmelCase__ :float , lowerCAmelCase__ :float ) -> float | ValueError:
'''simple docstring'''
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def UpperCAmelCase__ ( lowerCAmelCase__ :float , lowerCAmelCase__ :float , lowerCAmelCase__ :float ) -> float | ValueError:
'''simple docstring'''
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
| 359
| 0
|
'''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __lowercase ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
lowerCamelCase : int = StableUnCLIPPipeline
lowerCamelCase : Optional[Any] = TEXT_TO_IMAGE_PARAMS
lowerCamelCase : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase : str = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
lowerCamelCase : Dict = False
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = 3_2
lowerCamelCase_ : int = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowerCamelCase_ : Any = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
lowerCamelCase_ : List[str] = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=A , projection_dim=A , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
lowerCamelCase_ : Any = PriorTransformer(
num_attention_heads=2 , attention_head_dim=1_2 , embedding_dim=A , num_layers=1 , )
torch.manual_seed(0 )
lowerCamelCase_ : Optional[Any] = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1_0_0_0 , clip_sample=A , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
lowerCamelCase_ : Optional[Any] = StableUnCLIPImageNormalizer(embedding_dim=A )
lowerCamelCase_ : str = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
lowerCamelCase_ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
lowerCamelCase_ : List[Any] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=A , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
lowerCamelCase_ : List[Any] = UNetaDConditionModel(
sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(3_2, 6_4) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=A , layers_per_block=1 , upcast_attention=A , use_linear_projection=A , )
torch.manual_seed(0 )
lowerCamelCase_ : Dict = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type='''v_prediction''' , set_alpha_to_one=A , steps_offset=1 , )
torch.manual_seed(0 )
lowerCamelCase_ : Optional[int] = AutoencoderKL()
lowerCamelCase_ : List[Any] = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def UpperCAmelCase__ (self , A , A=0 ):
if str(A ).startswith('''mps''' ):
lowerCamelCase_ : List[str] = torch.manual_seed(A )
else:
lowerCamelCase_ : Optional[Any] = torch.Generator(device=A ).manual_seed(A )
lowerCamelCase_ : Union[str, Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=A )
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
lowerCamelCase_ : Optional[int] = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase_ : List[Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCamelCase_ : Any = pipe('''anime turle''' , generator=A , output_type='''np''' )
lowerCamelCase_ : Any = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(A , A )
def UpperCAmelCase__ (self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase_ : Optional[int] = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
lowerCamelCase_ : Dict = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase_ : Tuple = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
lowerCamelCase_ : List[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 1_0**9
| 357
|
'''simple docstring'''
def lowercase_ ( _lowercase , _lowercase ) -> Dict:
'''simple docstring'''
lowerCamelCase_ : List[Any] = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ : str = 0
while b > 0:
if b & 1:
lowerCamelCase_ : Optional[int] = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 357
| 1
|
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
_UpperCAmelCase : Dict = HUGGINGFACE_HUB_CACHE
_UpperCAmelCase : Tuple = """config.json"""
_UpperCAmelCase : Tuple = """diffusion_pytorch_model.bin"""
_UpperCAmelCase : Union[str, Any] = """diffusion_flax_model.msgpack"""
_UpperCAmelCase : Optional[int] = """model.onnx"""
_UpperCAmelCase : Any = """diffusion_pytorch_model.safetensors"""
_UpperCAmelCase : Any = """weights.pb"""
_UpperCAmelCase : Union[str, Any] = """https://huggingface.co"""
_UpperCAmelCase : Tuple = default_cache_path
_UpperCAmelCase : Dict = """diffusers_modules"""
_UpperCAmelCase : int = os.getenv("""HF_MODULES_CACHE""", os.path.join(hf_cache_home, """modules"""))
_UpperCAmelCase : List[str] = ["""fp16""", """non-ema"""]
_UpperCAmelCase : str = """.self_attn"""
| 295
|
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCAmelCase ( unittest.TestCase ):
def A_ ( self : Any ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def A_ ( self : Optional[Any] ) -> Optional[int]:
lowerCamelCase__ , lowerCamelCase__ : str = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny' , from_pt=UpperCAmelCase , dtype=jnp.bfloataa )
lowerCamelCase__ , lowerCamelCase__ : int = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=UpperCAmelCase , from_pt=UpperCAmelCase , dtype=jnp.bfloataa )
lowerCamelCase__ : int = controlnet_params
lowerCamelCase__ : Any = 'bird'
lowerCamelCase__ : Dict = jax.device_count()
lowerCamelCase__ : Any = pipe.prepare_text_inputs([prompts] * num_samples )
lowerCamelCase__ : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' )
lowerCamelCase__ : Tuple = pipe.prepare_image_inputs([canny_image] * num_samples )
lowerCamelCase__ : List[str] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Optional[int] = jax.random.split(UpperCAmelCase , jax.device_count() )
lowerCamelCase__ : List[Any] = replicate(UpperCAmelCase )
lowerCamelCase__ : Dict = shard(UpperCAmelCase )
lowerCamelCase__ : Any = shard(UpperCAmelCase )
lowerCamelCase__ : Any = pipe(
prompt_ids=UpperCAmelCase , image=UpperCAmelCase , params=UpperCAmelCase , prng_seed=UpperCAmelCase , num_inference_steps=50 , jit=UpperCAmelCase , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
lowerCamelCase__ : Dict = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCamelCase__ : List[str] = images[0, 253:256, 253:256, -1]
lowerCamelCase__ : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCamelCase__ : Tuple = jnp.array(
[0.1_6_7_9_6_9, 0.1_1_6_6_9_9, 0.0_8_1_5_4_3, 0.1_5_4_2_9_7, 0.1_3_2_8_1_2, 0.1_0_8_8_8_7, 0.1_6_9_9_2_2, 0.1_6_9_9_2_2, 0.2_0_5_0_7_8] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def A_ ( self : List[str] ) -> Union[str, Any]:
lowerCamelCase__ , lowerCamelCase__ : Dict = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose' , from_pt=UpperCAmelCase , dtype=jnp.bfloataa )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=UpperCAmelCase , from_pt=UpperCAmelCase , dtype=jnp.bfloataa )
lowerCamelCase__ : Optional[int] = controlnet_params
lowerCamelCase__ : Any = 'Chef in the kitchen'
lowerCamelCase__ : str = jax.device_count()
lowerCamelCase__ : Union[str, Any] = pipe.prepare_text_inputs([prompts] * num_samples )
lowerCamelCase__ : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' )
lowerCamelCase__ : List[str] = pipe.prepare_image_inputs([pose_image] * num_samples )
lowerCamelCase__ : Tuple = jax.random.PRNGKey(0 )
lowerCamelCase__ : List[str] = jax.random.split(UpperCAmelCase , jax.device_count() )
lowerCamelCase__ : Dict = replicate(UpperCAmelCase )
lowerCamelCase__ : List[str] = shard(UpperCAmelCase )
lowerCamelCase__ : Tuple = shard(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = pipe(
prompt_ids=UpperCAmelCase , image=UpperCAmelCase , params=UpperCAmelCase , prng_seed=UpperCAmelCase , num_inference_steps=50 , jit=UpperCAmelCase , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
lowerCamelCase__ : Dict = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCamelCase__ : int = images[0, 253:256, 253:256, -1]
lowerCamelCase__ : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCamelCase__ : int = jnp.array(
[[0.2_7_1_4_8_4, 0.2_6_1_7_1_9, 0.2_7_5_3_9_1, 0.2_7_7_3_4_4, 0.2_7_9_2_9_7, 0.2_9_1_0_1_6, 0.2_9_4_9_2_2, 0.3_0_2_7_3_4, 0.3_0_2_7_3_4]] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 295
| 1
|
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
_snake_case : Dict = {
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 128,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 50,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 10,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 10,
'exponential_decay_length_penalty': (5, 1.01),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def lowercase ( cls : Optional[Any] ) -> Union[str, Any]:
__lowerCAmelCase = TOKEN
HfFolder.save_token(lowerCAmelCase_ )
@classmethod
def lowercase ( cls : Optional[int] ) -> int:
try:
delete_repo(token=cls._token , repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-config' )
except HTTPError:
pass
def lowercase ( self : Optional[int] ) -> Tuple:
__lowerCAmelCase = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub('test-config' , use_auth_token=self._token )
__lowerCAmelCase = BertConfig.from_pretrained(f"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase_ , getattr(lowerCAmelCase_ , lowerCAmelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase_ , repo_id='test-config' , push_to_hub=lowerCAmelCase_ , use_auth_token=self._token )
__lowerCAmelCase = BertConfig.from_pretrained(f"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase_ , getattr(lowerCAmelCase_ , lowerCAmelCase_ ) )
def lowercase ( self : str ) -> List[str]:
__lowerCAmelCase = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub('valid_org/test-config-org' , use_auth_token=self._token )
__lowerCAmelCase = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase_ , getattr(lowerCAmelCase_ , lowerCAmelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCAmelCase_ , repo_id='valid_org/test-config-org' , push_to_hub=lowerCAmelCase_ , use_auth_token=self._token )
__lowerCAmelCase = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase_ , getattr(lowerCAmelCase_ , lowerCAmelCase_ ) )
def lowercase ( self : Union[str, Any] ) -> Tuple:
CustomConfig.register_for_auto_class()
__lowerCAmelCase = CustomConfig(attribute=4_2 )
config.push_to_hub('test-dynamic-config' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'AutoConfig': 'custom_configuration.CustomConfig'} )
__lowerCAmelCase = AutoConfig.from_pretrained(f"""{USER}/test-dynamic-config""" , trust_remote_code=lowerCAmelCase_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , 'CustomConfig' )
self.assertEqual(new_config.attribute , 4_2 )
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : int ) -> Union[str, Any]:
__lowerCAmelCase = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
__lowerCAmelCase = c.n_embd + 1 # int
__lowerCAmelCase = c.resid_pdrop + 1.0 # float
__lowerCAmelCase = not c.scale_attn_weights # bool
__lowerCAmelCase = c.summary_type + 'foo' # str
c.update_from_string(
f"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" )
self.assertEqual(lowerCAmelCase_ , c.n_embd , 'mismatch for key: n_embd' )
self.assertEqual(lowerCAmelCase_ , c.resid_pdrop , 'mismatch for key: resid_pdrop' )
self.assertEqual(lowerCAmelCase_ , c.scale_attn_weights , 'mismatch for key: scale_attn_weights' )
self.assertEqual(lowerCAmelCase_ , c.summary_type , 'mismatch for key: summary_type' )
def lowercase ( self : List[str] ) -> Union[str, Any]:
__lowerCAmelCase = PretrainedConfig()
__lowerCAmelCase = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCAmelCase_ , ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
__lowerCAmelCase = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCAmelCase_ , lowerCAmelCase_ )]
if len(lowerCAmelCase_ ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
f""" {", ".join(lowerCAmelCase_ )}.""" )
def lowercase ( self : int ) -> Dict:
with self.assertRaises(lowerCAmelCase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
__lowerCAmelCase = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
__lowerCAmelCase = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' , subfolder='bert' )
self.assertIsNotNone(lowerCAmelCase_ )
def lowercase ( self : Optional[int] ) -> int:
# A mock response for an HTTP head request to emulate server down
__lowerCAmelCase = mock.Mock()
__lowerCAmelCase = 5_0_0
__lowerCAmelCase = {}
__lowerCAmelCase = HTTPError
__lowerCAmelCase = {}
# Download this model to make sure it's in the cache.
__lowerCAmelCase = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=lowerCAmelCase_ ) as mock_head:
__lowerCAmelCase = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def lowercase ( self : Optional[int] ) -> List[str]:
# This test is for deprecated behavior and can be removed in v5
__lowerCAmelCase = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def lowercase ( self : Dict ) -> List[Any]:
__lowerCAmelCase = AutoConfig.from_pretrained('bert-base-cased' )
__lowerCAmelCase = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = 2
json.dump(configuration.to_dict() , open(os.path.join(lowerCAmelCase_ , 'config.4.0.0.json' ) , 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
__lowerCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
__lowerCAmelCase = ['config.42.0.0.json']
__lowerCAmelCase = 7_6_8
configuration.save_pretrained(lowerCAmelCase_ )
shutil.move(os.path.join(lowerCAmelCase_ , 'config.4.0.0.json' ) , os.path.join(lowerCAmelCase_ , 'config.42.0.0.json' ) )
__lowerCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertEqual(new_configuration.hidden_size , 7_6_8 )
def lowercase ( self : Union[str, Any] ) -> Any:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
__lowerCAmelCase = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
__lowerCAmelCase = 'v4.0.0'
__lowerCAmelCase , __lowerCAmelCase = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCAmelCase_ , return_unused_kwargs=lowerCAmelCase_ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCAmelCase_ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
__lowerCAmelCase = 'v3.0.0'
__lowerCAmelCase = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertEqual(old_configuration.hidden_size , 7_6_8 )
| 713
|
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def a_ ( lowerCAmelCase_ : Features ):
__lowerCAmelCase = np.inf
def set_batch_size(lowerCAmelCase_ : FeatureType ) -> None:
nonlocal batch_size
if isinstance(lowerCAmelCase_, lowerCAmelCase_ ):
__lowerCAmelCase = min(lowerCAmelCase_, config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(lowerCAmelCase_, lowerCAmelCase_ ):
__lowerCAmelCase = min(lowerCAmelCase_, config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(lowerCAmelCase_, lowerCAmelCase_ ) and feature.dtype == "binary":
__lowerCAmelCase = min(lowerCAmelCase_, config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(lowerCAmelCase_, lowerCAmelCase_ )
return None if batch_size is np.inf else batch_size
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase_ : NestedDataStructureLike[PathLike] , lowerCAmelCase_ : Optional[NamedSplit] = None , lowerCAmelCase_ : Optional[Features] = None , lowerCAmelCase_ : str = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional[int] = None , **lowerCAmelCase_ : int , ) -> str:
super().__init__(
lowerCAmelCase_ , split=lowerCAmelCase_ , features=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , keep_in_memory=lowerCAmelCase_ , streaming=lowerCAmelCase_ , num_proc=lowerCAmelCase_ , **lowerCAmelCase_ , )
__lowerCAmelCase = path_or_paths if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else {self.split: path_or_paths}
__lowerCAmelCase = _PACKAGED_DATASETS_MODULES['parquet'][1]
__lowerCAmelCase = Parquet(
cache_dir=lowerCAmelCase_ , data_files=lowerCAmelCase_ , features=lowerCAmelCase_ , hash=lowerCAmelCase_ , **lowerCAmelCase_ , )
def lowercase ( self : Optional[int] ) -> int:
# Build iterable dataset
if self.streaming:
__lowerCAmelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
self.builder.download_and_prepare(
download_config=lowerCAmelCase_ , download_mode=lowerCAmelCase_ , verification_mode=lowerCAmelCase_ , base_path=lowerCAmelCase_ , num_proc=self.num_proc , )
__lowerCAmelCase = self.builder.as_dataset(
split=self.split , verification_mode=lowerCAmelCase_ , in_memory=self.keep_in_memory )
return dataset
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : int , lowerCAmelCase_ : Dataset , lowerCAmelCase_ : Union[PathLike, BinaryIO] , lowerCAmelCase_ : Optional[int] = None , **lowerCAmelCase_ : int , ) -> Any:
__lowerCAmelCase = dataset
__lowerCAmelCase = path_or_buf
__lowerCAmelCase = batch_size or get_writer_batch_size(dataset.features )
__lowerCAmelCase = parquet_writer_kwargs
def lowercase ( self : Optional[int] ) -> int:
__lowerCAmelCase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , 'wb+' ) as buffer:
__lowerCAmelCase = self._write(file_obj=lowerCAmelCase_ , batch_size=lowerCAmelCase_ , **self.parquet_writer_kwargs )
else:
__lowerCAmelCase = self._write(file_obj=self.path_or_buf , batch_size=lowerCAmelCase_ , **self.parquet_writer_kwargs )
return written
def lowercase ( self : List[Any] , lowerCAmelCase_ : BinaryIO , lowerCAmelCase_ : int , **lowerCAmelCase_ : Optional[int] ) -> int:
__lowerCAmelCase = 0
__lowerCAmelCase = parquet_writer_kwargs.pop('path_or_buf' , lowerCAmelCase_ )
__lowerCAmelCase = self.dataset.features.arrow_schema
__lowerCAmelCase = pq.ParquetWriter(lowerCAmelCase_ , schema=lowerCAmelCase_ , **lowerCAmelCase_ )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , lowerCAmelCase_ ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating parquet from Arrow format' , ):
__lowerCAmelCase = query_table(
table=self.dataset._data , key=slice(lowerCAmelCase_ , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(lowerCAmelCase_ )
written += batch.nbytes
writer.close()
return written
| 421
| 0
|
"""simple docstring"""
def _snake_case ( _snake_case : int = 1_00 ) -> int:
'''simple docstring'''
_A = n * (n + 1) * (2 * n + 1) / 6
_A = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 7
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'''
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : str = '''speech_to_text'''
UpperCAmelCase : List[Any] = ['''past_key_values''']
UpperCAmelCase : Tuple = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : int , _UpperCAmelCase : Union[str, Any]=10_000 , _UpperCAmelCase : Tuple=12 , _UpperCAmelCase : int=2_048 , _UpperCAmelCase : Optional[Any]=4 , _UpperCAmelCase : List[str]=6 , _UpperCAmelCase : Tuple=2_048 , _UpperCAmelCase : str=4 , _UpperCAmelCase : int=0.0 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Union[str, Any]="relu" , _UpperCAmelCase : List[Any]=256 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Any=0.0 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : str=0.02 , _UpperCAmelCase : Any=2 , _UpperCAmelCase : Dict=True , _UpperCAmelCase : List[str]=1 , _UpperCAmelCase : Tuple=0 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : List[str]=6_000 , _UpperCAmelCase : Optional[Any]=1_024 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : Any=(5, 5) , _UpperCAmelCase : int=1_024 , _UpperCAmelCase : str=80 , _UpperCAmelCase : Any=1 , **_UpperCAmelCase : Tuple , ):
_A = vocab_size
_A = d_model
_A = encoder_ffn_dim
_A = encoder_layers
_A = encoder_attention_heads
_A = decoder_ffn_dim
_A = decoder_layers
_A = decoder_attention_heads
_A = dropout
_A = attention_dropout
_A = activation_dropout
_A = activation_function
_A = init_std
_A = encoder_layerdrop
_A = decoder_layerdrop
_A = use_cache
_A = encoder_layers
_A = scale_embedding # scale factor will be sqrt(d_model) if True
_A = max_source_positions
_A = max_target_positions
_A = num_conv_layers
_A = list(_UpperCAmelCase )
_A = conv_channels
_A = input_feat_per_channel
_A = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '
F'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
| 7
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
lowerCamelCase_ : Tuple = logging.get_logger(__name__)
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , *snake_case_ , **snake_case_ ):
"""simple docstring"""
warnings.warn(
'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use BeitImageProcessor instead.' , snake_case_ , )
super().__init__(*snake_case_ , **snake_case_ )
| 703
|
"""simple docstring"""
import sys
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Dict = len(_UpperCAmelCase )
A_ : int = [[0 for x in range(_UpperCAmelCase )] for x in range(_UpperCAmelCase )]
A_ : Tuple = [[0 for x in range(_UpperCAmelCase )] for x in range(_UpperCAmelCase )]
for chain_length in range(2 , _UpperCAmelCase ):
for a in range(1 , n - chain_length + 1 ):
A_ : Optional[Any] = a + chain_length - 1
A_ : List[str] = sys.maxsize
for c in range(_UpperCAmelCase , _UpperCAmelCase ):
A_ : Any = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
A_ : Optional[Any] = cost
A_ : Optional[int] = c
return matrix, sol
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
if i == j:
print('A' + str(_UpperCAmelCase ) , end=' ' )
else:
print('(' , end=' ' )
print_optiomal_solution(_UpperCAmelCase , _UpperCAmelCase , optimal_solution[i][j] )
print_optiomal_solution(_UpperCAmelCase , optimal_solution[i][j] + 1 , _UpperCAmelCase )
print(')' , end=' ' )
def UpperCAmelCase__ ( ):
"""simple docstring"""
A_ : Optional[Any] = [30, 35, 15, 5, 10, 20, 25]
A_ : Optional[Any] = len(_UpperCAmelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
A_ , A_ : int = matrix_chain_order(_UpperCAmelCase )
print('No. of Operation required: ' + str(matrix[1][n - 1] ) )
print_optiomal_solution(_UpperCAmelCase , 1 , n - 1 )
if __name__ == "__main__":
main()
| 302
| 0
|
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase_ (lowercase__ ):
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=99 , lowercase_=32 , lowercase_=5 , lowercase_=4 , lowercase_=37 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=512 , lowercase_=16 , lowercase_=2 , lowercase_=0.02 , lowercase_=False , lowercase_=True , lowercase_="None" , lowercase_=3 , lowercase_=4 , lowercase_=None , ) -> Any:
a__ =parent
a__ =batch_size
a__ =seq_length
a__ =is_training
a__ =use_input_mask
a__ =use_token_type_ids
a__ =use_labels
a__ =vocab_size
a__ =hidden_size
a__ =num_hidden_layers
a__ =num_attention_heads
a__ =intermediate_size
a__ =hidden_act
a__ =hidden_dropout_prob
a__ =attention_probs_dropout_prob
a__ =max_position_embeddings
a__ =type_vocab_size
a__ =type_sequence_label_size
a__ =initializer_range
a__ =num_labels
a__ =num_choices
a__ =relative_attention
a__ =position_biased_input
a__ =pos_att_type
a__ =scope
def __UpperCamelCase ( self) -> List[str]:
a__ =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a__ =None
if self.use_input_mask:
a__ =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
a__ =None
if self.use_token_type_ids:
a__ =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a__ =None
a__ =None
a__ =None
if self.use_labels:
a__ =ids_tensor([self.batch_size] , self.type_sequence_label_size)
a__ =ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a__ =ids_tensor([self.batch_size] , self.num_choices)
a__ =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self) -> Any:
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __UpperCamelCase ( self , lowercase_) -> Tuple:
self.parent.assertListEqual(list(result.loss.size()) , [])
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Any:
a__ =DebertaVaModel(config=lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_)[0]
a__ =model(lowercase_ , token_type_ids=lowercase_)[0]
a__ =model(lowercase_)[0]
self.parent.assertListEqual(list(sequence_output.size()) , [self.batch_size, self.seq_length, self.hidden_size])
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Any:
a__ =DebertaVaForMaskedLM(config=lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Dict:
a__ =self.num_labels
a__ =DebertaVaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_)
self.parent.assertListEqual(list(result.logits.size()) , [self.batch_size, self.num_labels])
self.check_loss_output(lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> int:
a__ =self.num_labels
a__ =DebertaVaForTokenClassification(config=lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> List[Any]:
a__ =DebertaVaForQuestionAnswering(config=lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Tuple:
a__ =DebertaVaForMultipleChoice(config=lowercase_)
model.to(lowercase_)
model.eval()
a__ =input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a__ =token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a__ =input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a__ =model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def __UpperCamelCase ( self) -> Tuple:
a__ =self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) =config_and_inputs
a__ ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowercase_ (lowercase__ , lowercase__ , unittest.TestCase ):
snake_case =(
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
snake_case =(
{
'feature-extraction': DebertaVaModel,
'fill-mask': DebertaVaForMaskedLM,
'question-answering': DebertaVaForQuestionAnswering,
'text-classification': DebertaVaForSequenceClassification,
'token-classification': DebertaVaForTokenClassification,
'zero-shot': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case =True
snake_case =False
snake_case =False
snake_case =False
snake_case =False
def __UpperCamelCase ( self) -> List[Any]:
a__ =DebertaVaModelTester(self)
a__ =ConfigTester(self , config_class=lowercase_ , hidden_size=37)
def __UpperCamelCase ( self) -> Union[str, Any]:
self.config_tester.run_common_tests()
def __UpperCamelCase ( self) -> int:
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowercase_)
def __UpperCamelCase ( self) -> Dict:
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowercase_)
def __UpperCamelCase ( self) -> str:
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowercase_)
def __UpperCamelCase ( self) -> Dict:
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowercase_)
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowercase_)
def __UpperCamelCase ( self) -> Dict:
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*lowercase_)
@slow
def __UpperCamelCase ( self) -> Union[str, Any]:
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ =DebertaVaModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase_ (unittest.TestCase ):
@unittest.skip(reason='Model not available yet')
def __UpperCamelCase ( self) -> Optional[int]:
pass
@slow
def __UpperCamelCase ( self) -> List[str]:
a__ =DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge')
a__ =torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]])
a__ =torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
a__ =model(lowercase_ , attention_mask=lowercase_)[0]
# compare the actual values for a slice.
a__ =torch.tensor(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowercase_ , atol=1e-4) , F"""{output[:, 1:4, 1:4]}""")
| 20
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class UpperCamelCase :
def __init__( self : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str=13 , UpperCAmelCase__ : int=7 , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : List[Any]=99 , UpperCAmelCase__ : str=32 , UpperCAmelCase__ : Dict=5 , UpperCAmelCase__ : str=4 , UpperCAmelCase__ : str=37 , UpperCAmelCase__ : List[Any]="gelu" , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : str=0.1 , UpperCAmelCase__ : List[str]=512 , UpperCAmelCase__ : Any=16 , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : Dict=0.0_2 , UpperCAmelCase__ : Optional[int]=3 , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : Optional[Any]=None , ) -> Union[str, Any]:
_a : str = parent
_a : List[str] = batch_size
_a : List[str] = seq_length
_a : Any = is_training
_a : Any = use_token_type_ids
_a : Tuple = use_labels
_a : Optional[Any] = vocab_size
_a : Optional[int] = hidden_size
_a : Union[str, Any] = num_hidden_layers
_a : Optional[int] = num_attention_heads
_a : List[Any] = intermediate_size
_a : Any = hidden_act
_a : Optional[int] = hidden_dropout_prob
_a : Any = attention_probs_dropout_prob
_a : int = max_position_embeddings
_a : Tuple = type_vocab_size
_a : str = type_sequence_label_size
_a : Dict = initializer_range
_a : List[Any] = num_labels
_a : Union[str, Any] = num_choices
_a : List[Any] = scope
_a : List[Any] = self.vocab_size - 1
def _lowercase ( self : Any ) -> int:
_a : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a : Optional[Any] = None
if self.use_token_type_ids:
_a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a : List[Any] = None
_a : List[Any] = None
_a : Union[str, Any] = None
if self.use_labels:
_a : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
_a : Union[str, Any] = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
_a : Tuple = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , *UpperCAmelCase__ : Tuple ) -> List[str]:
_a : Optional[int] = OpenAIGPTModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_a : Optional[Any] = model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , head_mask=UpperCAmelCase__ )
_a : List[str] = model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
_a : Optional[Any] = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , *UpperCAmelCase__ : Optional[int] ) -> List[str]:
_a : str = OpenAIGPTLMHeadModel(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_a : Optional[int] = model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] , *UpperCAmelCase__ : Union[str, Any] ) -> Any:
_a : int = OpenAIGPTDoubleHeadsModel(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_a : Tuple = model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self : Tuple , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , *UpperCAmelCase__ : Any ) -> List[Any]:
_a : Dict = self.num_labels
_a : str = OpenAIGPTForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_a : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a : Optional[int] = model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : List[str] ) -> Union[str, Any]:
_a : Optional[Any] = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) : Optional[int] = config_and_inputs
_a : Any = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_torch
class UpperCamelCase ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
UpperCamelCase : Dict = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
UpperCamelCase : Dict = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
UpperCamelCase : Optional[Any] = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _lowercase ( self : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> Optional[int]:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _lowercase ( self : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int]=False ) -> List[Any]:
_a : Optional[Any] = super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
_a : List[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=UpperCAmelCase__ , )
_a : Dict = inputs_dict["""labels"""]
_a : List[Any] = inputs_dict["""labels"""]
_a : List[str] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=UpperCAmelCase__ , )
_a : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ )
return inputs_dict
def _lowercase ( self : Dict ) -> List[Any]:
_a : Dict = OpenAIGPTModelTester(self )
_a : Any = ConfigTester(self , config_class=UpperCAmelCase__ , n_embd=37 )
def _lowercase ( self : Tuple ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _lowercase ( self : Optional[int] ) -> Any:
_a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*UpperCAmelCase__ )
def _lowercase ( self : List[Any] ) -> int:
_a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*UpperCAmelCase__ )
def _lowercase ( self : Optional[int] ) -> int:
_a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*UpperCAmelCase__ )
def _lowercase ( self : Dict ) -> Any:
_a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*UpperCAmelCase__ )
@slow
def _lowercase ( self : str ) -> List[str]:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : str = OpenAIGPTModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def _lowercase ( self : Union[str, Any] ) -> List[Any]:
_a : List[str] = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" )
model.to(UpperCAmelCase__ )
_a : Any = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=UpperCAmelCase__ ) # the president is
_a : List[Any] = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
_a : List[str] = model.generate(UpperCAmelCase__ , do_sample=UpperCAmelCase__ )
self.assertListEqual(output_ids[0].tolist() , UpperCAmelCase__ )
| 389
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'
),
}
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = '''xlm-roberta'''
def __init__( self , lowerCamelCase=3_05_22 , lowerCamelCase=7_68 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=30_72 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=5_12 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1e-12 , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=2 , lowerCamelCase="absolute" , lowerCamelCase=True , lowerCamelCase=None , **lowerCamelCase , ) -> Tuple:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
UpperCamelCase : str = vocab_size
UpperCamelCase : int = hidden_size
UpperCamelCase : List[Any] = num_hidden_layers
UpperCamelCase : Optional[Any] = num_attention_heads
UpperCamelCase : List[Any] = hidden_act
UpperCamelCase : Tuple = intermediate_size
UpperCamelCase : List[Any] = hidden_dropout_prob
UpperCamelCase : Union[str, Any] = attention_probs_dropout_prob
UpperCamelCase : Tuple = max_position_embeddings
UpperCamelCase : Union[str, Any] = type_vocab_size
UpperCamelCase : int = initializer_range
UpperCamelCase : str = layer_norm_eps
UpperCamelCase : Dict = position_embedding_type
UpperCamelCase : Tuple = use_cache
UpperCamelCase : int = classifier_dropout
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCamelCase : Optional[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 705
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ = {
'configuration_roberta_prelayernorm': [
'ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP',
'RobertaPreLayerNormConfig',
'RobertaPreLayerNormOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaPreLayerNormForCausalLM',
'RobertaPreLayerNormForMaskedLM',
'RobertaPreLayerNormForMultipleChoice',
'RobertaPreLayerNormForQuestionAnswering',
'RobertaPreLayerNormForSequenceClassification',
'RobertaPreLayerNormForTokenClassification',
'RobertaPreLayerNormModel',
'RobertaPreLayerNormPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaPreLayerNormForCausalLM',
'TFRobertaPreLayerNormForMaskedLM',
'TFRobertaPreLayerNormForMultipleChoice',
'TFRobertaPreLayerNormForQuestionAnswering',
'TFRobertaPreLayerNormForSequenceClassification',
'TFRobertaPreLayerNormForTokenClassification',
'TFRobertaPreLayerNormMainLayer',
'TFRobertaPreLayerNormModel',
'TFRobertaPreLayerNormPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'FlaxRobertaPreLayerNormForCausalLM',
'FlaxRobertaPreLayerNormForMaskedLM',
'FlaxRobertaPreLayerNormForMultipleChoice',
'FlaxRobertaPreLayerNormForQuestionAnswering',
'FlaxRobertaPreLayerNormForSequenceClassification',
'FlaxRobertaPreLayerNormForTokenClassification',
'FlaxRobertaPreLayerNormModel',
'FlaxRobertaPreLayerNormPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 435
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
a_ : Union[str, Any] = logging.get_logger(__name__)
a_ : List[str] = {
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = "gpt_neo"
_lowerCamelCase = ["past_key_values"]
_lowerCamelCase = {"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self , UpperCamelCase=5_0257 , UpperCamelCase=2048 , UpperCamelCase=2048 , UpperCamelCase=24 , UpperCamelCase=[[["global", "local"], 12]] , UpperCamelCase=16 , UpperCamelCase=None , UpperCamelCase=256 , UpperCamelCase="gelu_new" , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=0.1 , UpperCamelCase=1e-5 , UpperCamelCase=0.02 , UpperCamelCase=True , UpperCamelCase=5_0256 , UpperCamelCase=5_0256 , **UpperCamelCase , ):
"""simple docstring"""
lowerCamelCase_ = vocab_size
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_layers
lowerCamelCase_ = num_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = window_size
lowerCamelCase_ = activation_function
lowerCamelCase_ = resid_dropout
lowerCamelCase_ = embed_dropout
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = classifier_dropout
lowerCamelCase_ = layer_norm_epsilon
lowerCamelCase_ = initializer_range
lowerCamelCase_ = use_cache
lowerCamelCase_ = bos_token_id
lowerCamelCase_ = eos_token_id
lowerCamelCase_ = attention_types
lowerCamelCase_ = self.expand_attention_types_params(UpperCamelCase )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers` "
f'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
f'''`config.num_layers = {self.num_layers}`. '''
"`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument." )
super().__init__(bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase )
@staticmethod
def snake_case ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def __snake_case ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] ):
import torch
lowerCamelCase_ = input.size()
lowerCamelCase_ = len(UpperCAmelCase_ )
lowerCamelCase_ = shape[dimension]
lowerCamelCase_ = torch.arange(0 , UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase_ = torch.div(sizedim - size , UpperCAmelCase_ , rounding_mode="floor" ) + 1
lowerCamelCase_ = torch.arange(UpperCAmelCase_ ) + low_indices[:min_length][:, None]
lowerCamelCase_ = [slice(UpperCAmelCase_ )] * rank
lowerCamelCase_ = indices
lowerCamelCase_ = input[s]
lowerCamelCase_ = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(UpperCAmelCase_ )
def __snake_case ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] ):
import torch
lowerCamelCase_ = torch.arange(1 , UpperCAmelCase_ )
lowerCamelCase_ = torch.remainder(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase_ = remainders == 0
lowerCamelCase_ = candidates[divisor_indices]
lowerCamelCase_ = torch.max(UpperCAmelCase_ )
return largest_divisor, torch.div(UpperCAmelCase_ , UpperCAmelCase_ , rounding_mode="floor" )
class snake_case ( lowercase ):
"""simple docstring"""
@property
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase , direction="inputs" )
lowerCamelCase_ = {0: "batch", 1: "past_sequence + sequence"}
else:
lowerCamelCase_ = {0: "batch", 1: "sequence"}
return common_inputs
@property
def snake_case ( self ):
"""simple docstring"""
return self._config.num_heads
def snake_case ( self , UpperCamelCase , UpperCamelCase = -1 , UpperCamelCase = -1 , UpperCamelCase = False , UpperCamelCase = None , ):
"""simple docstring"""
lowerCamelCase_ = super(UpperCamelCase , self ).generate_dummy_inputs(
UpperCamelCase , batch_size=UpperCamelCase , seq_length=UpperCamelCase , is_pair=UpperCamelCase , framework=UpperCamelCase )
# We need to order the input in the way they appears in the forward()
lowerCamelCase_ = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
lowerCamelCase_ ,lowerCamelCase_ = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
lowerCamelCase_ = seqlen + 2
lowerCamelCase_ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCamelCase_ = [
(torch.zeros(UpperCamelCase ), torch.zeros(UpperCamelCase )) for _ in range(self.num_layers )
]
lowerCamelCase_ = common_inputs["attention_mask"]
if self.use_past:
lowerCamelCase_ = ordered_inputs["attention_mask"].dtype
lowerCamelCase_ = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(UpperCamelCase , UpperCamelCase , dtype=UpperCamelCase )] , dim=1 )
return ordered_inputs
@property
def snake_case ( self ):
"""simple docstring"""
return 13
| 675
|
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
a_ : Optional[Any] = logging.get_logger("""transformers.models.encodec""")
a_ : List[str] = {
"""quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""",
"""quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""",
"""quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""",
"""quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""",
}
a_ : Optional[int] = {
"""encoder.model.0.conv.conv""": """encoder.layers.0.conv""",
"""encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""",
"""encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""",
"""encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""",
"""encoder.model.3.conv.conv""": """encoder.layers.3.conv""",
"""encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""",
"""encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""",
"""encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""",
"""encoder.model.6.conv.conv""": """encoder.layers.6.conv""",
"""encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""",
"""encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""",
"""encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""",
"""encoder.model.9.conv.conv""": """encoder.layers.9.conv""",
"""encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""",
"""encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""",
"""encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""",
"""encoder.model.12.conv.conv""": """encoder.layers.12.conv""",
"""encoder.model.13.lstm""": """encoder.layers.13.lstm""",
"""encoder.model.15.conv.conv""": """encoder.layers.15.conv""",
}
a_ : Tuple = {
"""encoder.model.0.conv.norm""": """encoder.layers.0.norm""",
"""encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""",
"""encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""",
"""encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""",
"""encoder.model.3.conv.norm""": """encoder.layers.3.norm""",
"""encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""",
"""encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""",
"""encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""",
"""encoder.model.6.conv.norm""": """encoder.layers.6.norm""",
"""encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""",
"""encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""",
"""encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""",
"""encoder.model.9.conv.norm""": """encoder.layers.9.norm""",
"""encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""",
"""encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""",
"""encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""",
"""encoder.model.12.conv.norm""": """encoder.layers.12.norm""",
"""encoder.model.15.conv.norm""": """encoder.layers.15.norm""",
}
a_ : Union[str, Any] = {
"""decoder.model.0.conv.conv""": """decoder.layers.0.conv""",
"""decoder.model.1.lstm""": """decoder.layers.1.lstm""",
"""decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""",
"""decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""",
"""decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""",
"""decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""",
"""decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""",
"""decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""",
"""decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""",
"""decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""",
"""decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""",
"""decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""",
"""decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""",
"""decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""",
"""decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""",
"""decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""",
"""decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""",
"""decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""",
"""decoder.model.15.conv.conv""": """decoder.layers.15.conv""",
}
a_ : Union[str, Any] = {
"""decoder.model.0.conv.norm""": """decoder.layers.0.norm""",
"""decoder.model.3.convtr.norm""": """decoder.layers.3.norm""",
"""decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""",
"""decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""",
"""decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""",
"""decoder.model.6.convtr.norm""": """decoder.layers.6.norm""",
"""decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""",
"""decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""",
"""decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""",
"""decoder.model.9.convtr.norm""": """decoder.layers.9.norm""",
"""decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""",
"""decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""",
"""decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""",
"""decoder.model.12.convtr.norm""": """decoder.layers.12.norm""",
"""decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""",
"""decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""",
"""decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""",
"""decoder.model.15.conv.norm""": """decoder.layers.15.norm""",
}
a_ : Optional[Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
a_ : List[str] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
a_ : Any = []
a_ : str = []
def __snake_case ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple ):
for attribute in key.split("." ):
lowerCamelCase_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
if weight_type is not None:
lowerCamelCase_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ ).shape
else:
lowerCamelCase_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowerCamelCase_ = value
elif weight_type == "weight_g":
lowerCamelCase_ = value
elif weight_type == "weight_v":
lowerCamelCase_ = value
elif weight_type == "bias":
lowerCamelCase_ = value
elif weight_type == "running_mean":
lowerCamelCase_ = value
elif weight_type == "running_var":
lowerCamelCase_ = value
elif weight_type == "num_batches_tracked":
lowerCamelCase_ = value
elif weight_type == "weight_ih_l0":
lowerCamelCase_ = value
elif weight_type == "weight_hh_l0":
lowerCamelCase_ = value
elif weight_type == "bias_ih_l0":
lowerCamelCase_ = value
elif weight_type == "bias_hh_l0":
lowerCamelCase_ = value
elif weight_type == "weight_ih_l1":
lowerCamelCase_ = value
elif weight_type == "weight_hh_l1":
lowerCamelCase_ = value
elif weight_type == "bias_ih_l1":
lowerCamelCase_ = value
elif weight_type == "bias_hh_l1":
lowerCamelCase_ = value
else:
lowerCamelCase_ = value
logger.info(F'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' )
def __snake_case ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int] ):
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowerCamelCase_ ,lowerCamelCase_ = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def __snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple ):
lowerCamelCase_ = []
if model_name == "encodec_24khz" or "encodec_32khz":
lowerCamelCase_ = MAPPING_24K
elif model_name == "encodec_48khz":
lowerCamelCase_ = MAPPING_48K
else:
raise ValueError(F'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(UpperCAmelCase_ , UpperCAmelCase_ ):
logger.info(F'''{name} was ignored''' )
continue
lowerCamelCase_ = False
for key, mapped_key in MAPPING.items():
if "*" in key:
lowerCamelCase_ ,lowerCamelCase_ = key.split(".*." )
if prefix in name and suffix in name:
lowerCamelCase_ = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("embed" ) and name.endswith("embed_avg" ):
continue
lowerCamelCase_ = True
if "*" in mapped_key:
lowerCamelCase_ = name.split(UpperCAmelCase_ )[0].split("." )[-2]
lowerCamelCase_ = mapped_key.replace("*" , UpperCAmelCase_ )
if "weight_g" in name:
lowerCamelCase_ = "weight_g"
elif "weight_v" in name:
lowerCamelCase_ = "weight_v"
elif "weight_ih_l0" in name:
lowerCamelCase_ = "weight_ih_l0"
elif "weight_hh_l0" in name:
lowerCamelCase_ = "weight_hh_l0"
elif "bias_ih_l0" in name:
lowerCamelCase_ = "bias_ih_l0"
elif "bias_hh_l0" in name:
lowerCamelCase_ = "bias_hh_l0"
elif "weight_ih_l1" in name:
lowerCamelCase_ = "weight_ih_l1"
elif "weight_hh_l1" in name:
lowerCamelCase_ = "weight_hh_l1"
elif "bias_ih_l1" in name:
lowerCamelCase_ = "bias_ih_l1"
elif "bias_hh_l1" in name:
lowerCamelCase_ = "bias_hh_l1"
elif "bias" in name:
lowerCamelCase_ = "bias"
elif "weight" in name:
lowerCamelCase_ = "weight"
elif "running_mean" in name:
lowerCamelCase_ = "running_mean"
elif "running_var" in name:
lowerCamelCase_ = "running_var"
elif "num_batches_tracked" in name:
lowerCamelCase_ = "num_batches_tracked"
else:
lowerCamelCase_ = None
set_recursively(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
continue
if not is_used:
unused_weights.append(UpperCAmelCase_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def __snake_case ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[int]=None , ):
if config_path is not None:
lowerCamelCase_ = EncodecConfig.from_pretrained(UpperCAmelCase_ )
else:
lowerCamelCase_ = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
lowerCamelCase_ = [8, 5, 4, 4]
lowerCamelCase_ = [2.2]
lowerCamelCase_ = 64
lowerCamelCase_ = 32000
lowerCamelCase_ = 2048
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
elif model_name == "encodec_48khz":
lowerCamelCase_ = [8, 5, 4, 2]
lowerCamelCase_ = [3.0, 6.0, 12.0, 24.0]
lowerCamelCase_ = 48000
lowerCamelCase_ = 2
lowerCamelCase_ = False
lowerCamelCase_ = "time_group_norm"
lowerCamelCase_ = True
lowerCamelCase_ = 1.0
lowerCamelCase_ = 0.01
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
lowerCamelCase_ = EncodecModel(UpperCAmelCase_ )
lowerCamelCase_ = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(UpperCAmelCase_ )
lowerCamelCase_ = torch.load(UpperCAmelCase_ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
lowerCamelCase_ = original_checkpoint["best_state"]
recursively_load_weights(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
model.save_pretrained(UpperCAmelCase_ )
if repo_id:
print("Pushing to the hub..." )
feature_extractor.push_to_hub(UpperCAmelCase_ )
model.push_to_hub(UpperCAmelCase_ )
if __name__ == "__main__":
a_ : Dict = argparse.ArgumentParser()
parser.add_argument(
"""--model""",
default="""encodec_24khz""",
type=str,
help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
a_ : str = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 675
| 1
|
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
UpperCamelCase_ = [
# (stable-diffusion, HF Diffusers)
('time_embed.0.weight', 'time_embedding.linear_1.weight'),
('time_embed.0.bias', 'time_embedding.linear_1.bias'),
('time_embed.2.weight', 'time_embedding.linear_2.weight'),
('time_embed.2.bias', 'time_embedding.linear_2.bias'),
('input_blocks.0.0.weight', 'conv_in.weight'),
('input_blocks.0.0.bias', 'conv_in.bias'),
('out.0.weight', 'conv_norm_out.weight'),
('out.0.bias', 'conv_norm_out.bias'),
('out.2.weight', 'conv_out.weight'),
('out.2.bias', 'conv_out.bias'),
]
UpperCamelCase_ = [
# (stable-diffusion, HF Diffusers)
('in_layers.0', 'norm1'),
('in_layers.2', 'conv1'),
('out_layers.0', 'norm2'),
('out_layers.3', 'conv2'),
('emb_layers.1', 'time_emb_proj'),
('skip_connection', 'conv_shortcut'),
]
UpperCamelCase_ = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
UpperCamelCase_ = f'down_blocks.{i}.resnets.{j}.'
UpperCamelCase_ = f'input_blocks.{3*i + j + 1}.0.'
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
UpperCamelCase_ = f'down_blocks.{i}.attentions.{j}.'
UpperCamelCase_ = f'input_blocks.{3*i + j + 1}.1.'
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
UpperCamelCase_ = f'up_blocks.{i}.resnets.{j}.'
UpperCamelCase_ = f'output_blocks.{3*i + j}.0.'
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
UpperCamelCase_ = f'up_blocks.{i}.attentions.{j}.'
UpperCamelCase_ = f'output_blocks.{3*i + j}.1.'
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
UpperCamelCase_ = f'down_blocks.{i}.downsamplers.0.conv.'
UpperCamelCase_ = f'input_blocks.{3*(i+1)}.0.op.'
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
UpperCamelCase_ = f'up_blocks.{i}.upsamplers.0.'
UpperCamelCase_ = f'output_blocks.{3*i + 2}.{1 if i == 0 else 2}.'
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
UpperCamelCase_ = 'mid_block.attentions.0.'
UpperCamelCase_ = 'middle_block.1.'
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
UpperCamelCase_ = f'mid_block.resnets.{j}.'
UpperCamelCase_ = f'middle_block.{2*j}.'
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> List[str]:
# buyer beware: this is a *brittle* function,
# and correct output requires that all of these pieces interact in
# the exact order in which I have arranged them.
__UpperCAmelCase ={k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
__UpperCAmelCase =sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
__UpperCAmelCase =v.replace(snake_case__ , snake_case__ )
__UpperCAmelCase =v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
__UpperCAmelCase =v.replace(snake_case__ , snake_case__ )
__UpperCAmelCase =v
__UpperCAmelCase ={v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
UpperCamelCase_ = [
# (stable-diffusion, HF Diffusers)
('nin_shortcut', 'conv_shortcut'),
('norm_out', 'conv_norm_out'),
('mid.attn_1.', 'mid_block.attentions.0.'),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
UpperCamelCase_ = f'encoder.down_blocks.{i}.resnets.{j}.'
UpperCamelCase_ = f'encoder.down.{i}.block.{j}.'
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
UpperCamelCase_ = f'down_blocks.{i}.downsamplers.0.'
UpperCamelCase_ = f'down.{i}.downsample.'
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
UpperCamelCase_ = f'up_blocks.{i}.upsamplers.0.'
UpperCamelCase_ = f'up.{3-i}.upsample.'
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
UpperCamelCase_ = f'decoder.up_blocks.{i}.resnets.{j}.'
UpperCamelCase_ = f'decoder.up.{3-i}.block.{j}.'
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
UpperCamelCase_ = f'mid_block.resnets.{i}.'
UpperCamelCase_ = f'mid.block_{i+1}.'
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
UpperCamelCase_ = [
# (stable-diffusion, HF Diffusers)
('norm.', 'group_norm.'),
('q.', 'query.'),
('k.', 'key.'),
('v.', 'value.'),
('proj_out.', 'proj_attn.'),
]
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> Optional[int]:
# convert HF linear weights to SD conv2d weights
return w.reshape(*w.shape , 1 , 1 )
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> int:
__UpperCAmelCase ={k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
__UpperCAmelCase =v.replace(snake_case__ , snake_case__ )
__UpperCAmelCase =v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
__UpperCAmelCase =v.replace(snake_case__ , snake_case__ )
__UpperCAmelCase =v
__UpperCAmelCase ={v: vae_state_dict[k] for k, v in mapping.items()}
__UpperCAmelCase =['''q''', '''k''', '''v''', '''proj_out''']
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f"""mid.attn_1.{weight_name}.weight""" in k:
print(f"""Reshaping {k} for SD format""" )
__UpperCAmelCase =reshape_weight_for_sd(snake_case__ )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
UpperCamelCase_ = [
# (stable-diffusion, HF Diffusers)
('resblocks.', 'text_model.encoder.layers.'),
('ln_1', 'layer_norm1'),
('ln_2', 'layer_norm2'),
('.c_fc.', '.fc1.'),
('.c_proj.', '.fc2.'),
('.attn', '.self_attn'),
('ln_final.', 'transformer.text_model.final_layer_norm.'),
('token_embedding.weight', 'transformer.text_model.embeddings.token_embedding.weight'),
('positional_embedding', 'transformer.text_model.embeddings.position_embedding.weight'),
]
UpperCamelCase_ = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
UpperCamelCase_ = re.compile('|'.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
UpperCamelCase_ = {'q': 0, 'k': 1, 'v': 2}
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> int:
__UpperCAmelCase ={}
__UpperCAmelCase ={}
__UpperCAmelCase ={}
for k, v in text_enc_dict.items():
if (
k.endswith('''.self_attn.q_proj.weight''' )
or k.endswith('''.self_attn.k_proj.weight''' )
or k.endswith('''.self_attn.v_proj.weight''' )
):
__UpperCAmelCase =k[: -len('''.q_proj.weight''' )]
__UpperCAmelCase =k[-len('''q_proj.weight''' )]
if k_pre not in capture_qkv_weight:
__UpperCAmelCase =[None, None, None]
__UpperCAmelCase =v
continue
if (
k.endswith('''.self_attn.q_proj.bias''' )
or k.endswith('''.self_attn.k_proj.bias''' )
or k.endswith('''.self_attn.v_proj.bias''' )
):
__UpperCAmelCase =k[: -len('''.q_proj.bias''' )]
__UpperCAmelCase =k[-len('''q_proj.bias''' )]
if k_pre not in capture_qkv_bias:
__UpperCAmelCase =[None, None, None]
__UpperCAmelCase =v
continue
__UpperCAmelCase =textenc_pattern.sub(lambda snake_case__ : protected[re.escape(m.group(0 ) )] , snake_case__ )
__UpperCAmelCase =v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' )
__UpperCAmelCase =textenc_pattern.sub(lambda snake_case__ : protected[re.escape(m.group(0 ) )] , snake_case__ )
__UpperCAmelCase =torch.cat(snake_case__ )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' )
__UpperCAmelCase =textenc_pattern.sub(lambda snake_case__ : protected[re.escape(m.group(0 ) )] , snake_case__ )
__UpperCAmelCase =torch.cat(snake_case__ )
return new_state_dict
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> Optional[Any]:
return text_enc_dict
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--use_safetensors', action='store_true', help='Save weights use safetensors, default is ckpt.'
)
UpperCamelCase_ = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
UpperCamelCase_ = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.safetensors')
UpperCamelCase_ = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.safetensors')
UpperCamelCase_ = osp.join(args.model_path, 'text_encoder', 'model.safetensors')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
UpperCamelCase_ = load_file(unet_path, device='cpu')
else:
UpperCamelCase_ = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.bin')
UpperCamelCase_ = torch.load(unet_path, map_location='cpu')
if osp.exists(vae_path):
UpperCamelCase_ = load_file(vae_path, device='cpu')
else:
UpperCamelCase_ = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.bin')
UpperCamelCase_ = torch.load(vae_path, map_location='cpu')
if osp.exists(text_enc_path):
UpperCamelCase_ = load_file(text_enc_path, device='cpu')
else:
UpperCamelCase_ = osp.join(args.model_path, 'text_encoder', 'pytorch_model.bin')
UpperCamelCase_ = torch.load(text_enc_path, map_location='cpu')
# Convert the UNet model
UpperCamelCase_ = convert_unet_state_dict(unet_state_dict)
UpperCamelCase_ = {'model.diffusion_model.' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
UpperCamelCase_ = convert_vae_state_dict(vae_state_dict)
UpperCamelCase_ = {'first_stage_model.' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
UpperCamelCase_ = 'text_model.encoder.layers.22.layer_norm2.bias' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
UpperCamelCase_ = {'transformer.' + k: v for k, v in text_enc_dict.items()}
UpperCamelCase_ = convert_text_enc_state_dict_vaa(text_enc_dict)
UpperCamelCase_ = {'cond_stage_model.model.' + k: v for k, v in text_enc_dict.items()}
else:
UpperCamelCase_ = convert_text_enc_state_dict(text_enc_dict)
UpperCamelCase_ = {'cond_stage_model.transformer.' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
UpperCamelCase_ = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
UpperCamelCase_ = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
UpperCamelCase_ = {'state_dict': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 142
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'microsoft/focalnet-tiny': 'https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json',
}
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase , _lowerCAmelCase ):
a_ : Dict = '''focalnet'''
def __init__(self , UpperCAmelCase=2_2_4 , UpperCAmelCase=4 , UpperCAmelCase=3 , UpperCAmelCase=9_6 , UpperCAmelCase=False , UpperCAmelCase=[1_9_2, 3_8_4, 7_6_8, 7_6_8] , UpperCAmelCase=[2, 2, 6, 2] , UpperCAmelCase=[2, 2, 2, 2] , UpperCAmelCase=[3, 3, 3, 3] , UpperCAmelCase="gelu" , UpperCAmelCase=4.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase=False , UpperCAmelCase=1e-4 , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase=3_2 , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase)
__UpperCAmelCase =image_size
__UpperCAmelCase =patch_size
__UpperCAmelCase =num_channels
__UpperCAmelCase =embed_dim
__UpperCAmelCase =use_conv_embed
__UpperCAmelCase =hidden_sizes
__UpperCAmelCase =depths
__UpperCAmelCase =focal_levels
__UpperCAmelCase =focal_windows
__UpperCAmelCase =hidden_act
__UpperCAmelCase =mlp_ratio
__UpperCAmelCase =hidden_dropout_prob
__UpperCAmelCase =drop_path_rate
__UpperCAmelCase =use_layerscale
__UpperCAmelCase =layerscale_value
__UpperCAmelCase =use_post_layernorm
__UpperCAmelCase =use_post_layernorm_in_modulation
__UpperCAmelCase =normalize_modulator
__UpperCAmelCase =initializer_range
__UpperCAmelCase =layer_norm_eps
__UpperCAmelCase =encoder_stride
__UpperCAmelCase =['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(self.depths) + 1)]
__UpperCAmelCase , __UpperCAmelCase =get_aligned_output_features_output_indices(
out_features=UpperCAmelCase , out_indices=UpperCAmelCase , stage_names=self.stage_names)
| 142
| 1
|
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def UpperCamelCase ( _A : Optional[int] , _A : Any )-> Tuple:
"""simple docstring"""
A__ = checkpoint
A__ = {}
A__ = vae_state_dict['encoder.conv_in.weight']
A__ = vae_state_dict['encoder.conv_in.bias']
A__ = vae_state_dict['encoder.conv_out.weight']
A__ = vae_state_dict['encoder.conv_out.bias']
A__ = vae_state_dict['encoder.norm_out.weight']
A__ = vae_state_dict['encoder.norm_out.bias']
A__ = vae_state_dict['decoder.conv_in.weight']
A__ = vae_state_dict['decoder.conv_in.bias']
A__ = vae_state_dict['decoder.conv_out.weight']
A__ = vae_state_dict['decoder.conv_out.bias']
A__ = vae_state_dict['decoder.norm_out.weight']
A__ = vae_state_dict['decoder.norm_out.bias']
A__ = vae_state_dict['quant_conv.weight']
A__ = vae_state_dict['quant_conv.bias']
A__ = vae_state_dict['post_quant_conv.weight']
A__ = vae_state_dict['post_quant_conv.bias']
# Retrieves the keys for the encoder down blocks only
A__ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
A__ = {
layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(UpperCAmelCase__ )
}
# Retrieves the keys for the decoder up blocks only
A__ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
A__ = {
layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(UpperCAmelCase__ )
}
for i in range(UpperCAmelCase__ ):
A__ = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key]
if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
A__ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.weight""" )
A__ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.bias""" )
A__ = renew_vae_resnet_paths(UpperCAmelCase__ )
A__ = {'old': f"""down.{i}.block""", 'new': f"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , additional_replacements=[meta_path] , config=UpperCAmelCase__ )
A__ = [key for key in vae_state_dict if 'encoder.mid.block' in key]
A__ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
A__ = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key]
A__ = renew_vae_resnet_paths(UpperCAmelCase__ )
A__ = {'old': f"""mid.block_{i}""", 'new': f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , additional_replacements=[meta_path] , config=UpperCAmelCase__ )
A__ = [key for key in vae_state_dict if 'encoder.mid.attn' in key]
A__ = renew_vae_attention_paths(UpperCAmelCase__ )
A__ = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , additional_replacements=[meta_path] , config=UpperCAmelCase__ )
conv_attn_to_linear(UpperCAmelCase__ )
for i in range(UpperCAmelCase__ ):
A__ = num_up_blocks - 1 - i
A__ = [
key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key
]
if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
A__ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.weight"""
]
A__ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.bias"""
]
A__ = renew_vae_resnet_paths(UpperCAmelCase__ )
A__ = {'old': f"""up.{block_id}.block""", 'new': f"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , additional_replacements=[meta_path] , config=UpperCAmelCase__ )
A__ = [key for key in vae_state_dict if 'decoder.mid.block' in key]
A__ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
A__ = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key]
A__ = renew_vae_resnet_paths(UpperCAmelCase__ )
A__ = {'old': f"""mid.block_{i}""", 'new': f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , additional_replacements=[meta_path] , config=UpperCAmelCase__ )
A__ = [key for key in vae_state_dict if 'decoder.mid.attn' in key]
A__ = renew_vae_attention_paths(UpperCAmelCase__ )
A__ = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , additional_replacements=[meta_path] , config=UpperCAmelCase__ )
conv_attn_to_linear(UpperCAmelCase__ )
return new_checkpoint
def UpperCamelCase ( _A : str , _A : str , )-> Tuple:
"""simple docstring"""
A__ = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
A__ = io.BytesIO(r.content )
A__ = OmegaConf.load(UpperCAmelCase__ )
A__ = 512
A__ = 'cuda' if torch.cuda.is_available() else 'cpu'
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
A__ = {}
with safe_open(UpperCAmelCase__ , framework="pt" , device="cpu" ) as f:
for key in f.keys():
A__ = f.get_tensor(UpperCAmelCase__ )
else:
A__ = torch.load(UpperCAmelCase__ , map_location=UpperCAmelCase__ )['state_dict']
# Convert the VAE model.
A__ = create_vae_diffusers_config(UpperCAmelCase__ , image_size=UpperCAmelCase__ )
A__ = custom_convert_ldm_vae_checkpoint(UpperCAmelCase__ , UpperCAmelCase__ )
A__ = AutoencoderKL(**UpperCAmelCase__ )
vae.load_state_dict(UpperCAmelCase__ )
vae.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
UpperCAmelCase_ : Any = argparse.ArgumentParser()
parser.add_argument("--vae_pt_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
UpperCAmelCase_ : Tuple = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 491
|
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger()
@dataclass
class _SCREAMING_SNAKE_CASE:
A_ : nn.Module
A_ : List[nn.Module] = field(default_factory=_SCREAMING_SNAKE_CASE )
A_ : list = field(default_factory=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self : Union[str, Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tensor , UpperCamelCase_ : Tensor ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ :Optional[Any] = len(list(m.modules() ) ) == 1 or isinstance(UpperCamelCase_ , nn.Convad ) or isinstance(UpperCamelCase_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(UpperCamelCase_ )
def __call__( self : Tuple , UpperCamelCase_ : Tensor ) -> int:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(UpperCamelCase_ )
[x.remove() for x in self.handles]
return self
@property
def __lowerCamelCase ( self : Tuple ) -> str:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda UpperCamelCase_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class _SCREAMING_SNAKE_CASE:
A_ : nn.Module
A_ : nn.Module
A_ : int = 0
A_ : List = field(default_factory=_SCREAMING_SNAKE_CASE )
A_ : List = field(default_factory=_SCREAMING_SNAKE_CASE )
def __call__( self : List[Any] , UpperCamelCase_ : Tensor ) -> int:
SCREAMING_SNAKE_CASE__ :List[str] = Tracker(self.dest )(UpperCamelCase_ ).parametrized
SCREAMING_SNAKE_CASE__ :Any = Tracker(self.src )(UpperCamelCase_ ).parametrized
SCREAMING_SNAKE_CASE__ :Dict = list(filter(lambda UpperCamelCase_ : type(UpperCamelCase_ ) not in self.src_skip , UpperCamelCase_ ) )
SCREAMING_SNAKE_CASE__ :str = list(filter(lambda UpperCamelCase_ : type(UpperCamelCase_ ) not in self.dest_skip , UpperCamelCase_ ) )
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
raise Exception(
f'''Numbers of operations are different. Source module has {len(UpperCamelCase_ )} operations while'''
f''' destination module has {len(UpperCamelCase_ )}.''' )
for dest_m, src_m in zip(UpperCamelCase_ , UpperCamelCase_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'''Transfered from={src_m} to={dest_m}''' )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : ResNetConfig , UpperCAmelCase__ : Path , UpperCAmelCase__ : bool = True ) -> Union[str, Any]:
'''simple docstring'''
print(F'''Converting {name}...''' )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ :str = timm.create_model(UpperCAmelCase__ , pretrained=UpperCAmelCase__ ).eval()
SCREAMING_SNAKE_CASE__ :List[str] = ResNetForImageClassification(UpperCAmelCase__ ).eval()
SCREAMING_SNAKE_CASE__ :Dict = ModuleTransfer(src=UpperCAmelCase__ , dest=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :List[Any] = torch.randn((1, 3, 2_2_4, 2_2_4) )
module_transfer(UpperCAmelCase__ )
assert torch.allclose(from_model(UpperCAmelCase__ ) , our_model(UpperCAmelCase__ ).logits ), "The model logits don't match the original one."
SCREAMING_SNAKE_CASE__ :List[str] = F'''resnet{"-".join(name.split("resnet" ) )}'''
print(UpperCAmelCase__ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=UpperCAmelCase__ , )
# we can use the convnext one
SCREAMING_SNAKE_CASE__ :int = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=UpperCAmelCase__ , )
print(F'''Pushed {checkpoint_name}''' )
def lowerCamelCase ( UpperCAmelCase__ : Path , UpperCAmelCase__ : str = None , UpperCAmelCase__ : bool = True ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str] = 'imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE__ :Any = 1_0_0_0
SCREAMING_SNAKE_CASE__ :List[Any] = (1, num_labels)
SCREAMING_SNAKE_CASE__ :List[str] = 'huggingface/label-files'
SCREAMING_SNAKE_CASE__ :Union[str, Any] = num_labels
SCREAMING_SNAKE_CASE__ :Tuple = json.load(open(hf_hub_download(UpperCAmelCase__ , UpperCAmelCase__ , repo_type='dataset' ) , 'r' ) )
SCREAMING_SNAKE_CASE__ :int = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ :str = idalabel
SCREAMING_SNAKE_CASE__ :Any = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ :Optional[Any] = partial(UpperCAmelCase__ , num_labels=UpperCAmelCase__ , idalabel=UpperCAmelCase__ , labelaid=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :List[str] = {
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 2_3, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 3_6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(UpperCAmelCase__ , names_to_config[model_name] , UpperCAmelCase__ , UpperCAmelCase__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return config, expected_shape
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 209
| 0
|
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
lowerCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Union[str, Any]:
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(lowercase ):
return ext
raise Exception(
f"""Unable to determine file format from file extension {path}. """
f"""Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}""" )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
snake_case : Dict = pipeline(
task=args.task ,model=args.model if args.model else None ,config=args.config ,tokenizer=args.tokenizer ,device=args.device ,)
snake_case : List[str] = try_infer_format_from_ext(args.input ) if args.format == """infer""" else args.format
snake_case : List[str] = PipelineDataFormat.from_str(
format=lowercase ,output_path=args.output ,input_path=args.input ,column=args.column if args.column else nlp.default_input_names ,overwrite=args.overwrite ,)
return RunCommand(lowercase ,lowercase )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def __init__( self , A , A ) -> int:
snake_case : List[str] = nlp
snake_case : Union[str, Any] = reader
@staticmethod
def UpperCAmelCase ( A ) -> Union[str, Any]:
snake_case : str = parser.add_parser("""run""" , help="""Run a pipeline through the CLI""" )
run_parser.add_argument("""--task""" , choices=get_supported_tasks() , help="""Task to run""" )
run_parser.add_argument("""--input""" , type=A , help="""Path to the file to use for inference""" )
run_parser.add_argument("""--output""" , type=A , help="""Path to the file that will be used post to write results.""" )
run_parser.add_argument("""--model""" , type=A , help="""Name or path to the model to instantiate.""" )
run_parser.add_argument("""--config""" , type=A , help="""Name or path to the model's config to instantiate.""" )
run_parser.add_argument(
"""--tokenizer""" , type=A , help="""Name of the tokenizer to use. (default: same as the model name)""" )
run_parser.add_argument(
"""--column""" , type=A , help="""Name of the column to use as input. (For multi columns input as QA use column1,columns2)""" , )
run_parser.add_argument(
"""--format""" , type=A , default="""infer""" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="""Input format to read from""" , )
run_parser.add_argument(
"""--device""" , type=A , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
run_parser.add_argument("""--overwrite""" , action="""store_true""" , help="""Allow overwriting the output file.""" )
run_parser.set_defaults(func=A )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : int = self._nlp, []
for entry in self._reader:
snake_case : Union[str, Any] = nlp(**A ) if self._reader.is_multi_columns else nlp(A )
if isinstance(A , A ):
outputs.append(A )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
snake_case : List[str] = self._reader.save_binary(A )
logger.warning(f"""Current pipeline requires output to be in binary format, saving at {binary_path}""" )
else:
self._reader.save(A )
| 701
|
import numpy as np
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> np.array:
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class UpperCAmelCase_ ( __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = ShapEImgaImgPipeline
UpperCamelCase_ = ['''image''']
UpperCamelCase_ = ['''image''']
UpperCamelCase_ = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
UpperCamelCase_ = False
@property
def A__ ( self : Any ) -> List[Any]:
'''simple docstring'''
return 32
@property
def A__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
return 32
@property
def A__ ( self : str ) -> Tuple:
'''simple docstring'''
return self.time_input_dim * 4
@property
def A__ ( self : Any ) -> List[str]:
'''simple docstring'''
return 8
@property
def A__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
lowercase : List[Any] =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
lowercase : Optional[int] =CLIPVisionModel(UpperCAmelCase )
return model
@property
def A__ ( self : Dict ) -> int:
'''simple docstring'''
lowercase : Dict =CLIPImageProcessor(
crop_size=224 , do_center_crop=UpperCAmelCase , do_normalize=UpperCAmelCase , do_resize=UpperCAmelCase , image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , resample=3 , size=224 , )
return image_processor
@property
def A__ ( self : str ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase : Union[str, Any] ={
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
lowercase : Any =PriorTransformer(**UpperCAmelCase )
return model
@property
def A__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase : List[str] ={
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
lowercase : Optional[int] =ShapERenderer(**UpperCAmelCase )
return model
def A__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
lowercase : str =self.dummy_prior
lowercase : List[str] =self.dummy_image_encoder
lowercase : str =self.dummy_image_processor
lowercase : Dict =self.dummy_renderer
lowercase : Optional[Any] =HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1024 , prediction_type='''sample''' , use_karras_sigmas=UpperCAmelCase , clip_sample=UpperCAmelCase , clip_sample_range=1.0 , )
lowercase : Any ={
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def A__ ( self : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str]=0 ) -> List[str]:
'''simple docstring'''
lowercase : List[str] =floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
if str(UpperCAmelCase ).startswith('''mps''' ):
lowercase : str =torch.manual_seed(UpperCAmelCase )
else:
lowercase : Tuple =torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
lowercase : List[str] ={
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def A__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
lowercase : List[str] ='''cpu'''
lowercase : Optional[int] =self.get_dummy_components()
lowercase : List[Any] =self.pipeline_class(**UpperCAmelCase )
lowercase : Tuple =pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase : Tuple =pipe(**self.get_dummy_inputs(UpperCAmelCase ) )
lowercase : Tuple =output.images[0]
lowercase : int =image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowercase : Tuple =np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def A__ ( self : Dict ) -> str:
'''simple docstring'''
lowercase : Tuple =torch_device == '''cpu'''
lowercase : List[str] =True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=UpperCAmelCase , relax_max_difference=UpperCAmelCase , )
def A__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[Any] =self.get_dummy_components()
lowercase : Optional[int] =self.pipeline_class(**UpperCAmelCase )
lowercase : Optional[Any] =pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase : Optional[Any] =1
lowercase : Optional[int] =2
lowercase : Tuple =self.get_dummy_inputs(UpperCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
lowercase : Any =batch_size * [inputs[key]]
lowercase : List[Any] =pipe(**UpperCAmelCase , num_images_per_prompt=UpperCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self : str ) -> List[str]:
'''simple docstring'''
lowercase : Tuple =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
lowercase : List[Any] =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
lowercase : int =ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
lowercase : str =pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase : Tuple =torch.Generator(device=UpperCAmelCase ).manual_seed(0 )
lowercase : Optional[Any] =pipe(
UpperCAmelCase , generator=UpperCAmelCase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(UpperCAmelCase , UpperCAmelCase )
| 94
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = '''lxmert'''
_UpperCAmelCase = {}
def __init__( self , snake_case=30522 , snake_case=768 , snake_case=12 , snake_case=9500 , snake_case=1600 , snake_case=400 , snake_case=3072 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=2 , snake_case=0.02 , snake_case=1E-12 , snake_case=9 , snake_case=5 , snake_case=5 , snake_case=2048 , snake_case=4 , snake_case=6.67 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=True , **snake_case , ) -> Optional[int]:
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = num_qa_labels
_UpperCAmelCase = num_object_labels
_UpperCAmelCase = num_attr_labels
_UpperCAmelCase = l_layers
_UpperCAmelCase = x_layers
_UpperCAmelCase = r_layers
_UpperCAmelCase = visual_feat_dim
_UpperCAmelCase = visual_pos_dim
_UpperCAmelCase = visual_loss_normalizer
_UpperCAmelCase = task_matched
_UpperCAmelCase = task_mask_lm
_UpperCAmelCase = task_obj_predict
_UpperCAmelCase = task_qa
_UpperCAmelCase = visual_obj_loss
_UpperCAmelCase = visual_attr_loss
_UpperCAmelCase = visual_feat_loss
_UpperCAmelCase = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers}
super().__init__(**snake_case )
| 573
| 0
|
"""simple docstring"""
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase =["image_processor", "tokenizer"]
UpperCamelCase ="BridgeTowerImageProcessor"
UpperCamelCase =("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , UpperCamelCase_ , UpperCamelCase_ ) -> str:
super().__init__(UpperCamelCase_ , UpperCamelCase_ )
def __call__( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = True , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = 0 , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = True , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> BatchEncoding:
__lowercase : int = self.tokenizer(
text=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , stride=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , return_overflowing_tokens=UpperCamelCase_ , return_special_tokens_mask=UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , return_length=UpperCamelCase_ , verbose=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ , )
# add pixel_values + pixel_mask
__lowercase : Union[str, Any] = self.image_processor(
UpperCamelCase_ , return_tensors=UpperCamelCase_ , do_normalize=UpperCamelCase_ , do_center_crop=UpperCamelCase_ , **UpperCamelCase_ )
encoding.update(UpperCamelCase_ )
return encoding
def _lowerCamelCase ( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ )
def _lowerCamelCase ( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Union[str, Any]:
return self.tokenizer.decode(*UpperCamelCase_ , **UpperCamelCase_ )
@property
def _lowerCamelCase ( self ) -> Optional[int]:
__lowercase : Union[str, Any] = self.tokenizer.model_input_names
__lowercase : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 523
|
"""simple docstring"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def __UpperCAmelCase ( __UpperCamelCase ):
return (data["data"], data["target"])
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
__lowercase : str = XGBClassifier()
classifier.fit(__UpperCamelCase , __UpperCamelCase )
return classifier
def __UpperCAmelCase ( ):
__lowercase : int = load_iris()
__lowercase ,__lowercase : List[Any] = data_handling(__UpperCamelCase )
__lowercase ,__lowercase ,__lowercase ,__lowercase : Any = train_test_split(
__UpperCamelCase , __UpperCamelCase , test_size=0.25 )
__lowercase : Tuple = iris['''target_names''']
# Create an XGBoost Classifier from the training data
__lowercase : Optional[int] = xgboost(__UpperCamelCase , __UpperCamelCase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , display_labels=__UpperCamelCase , cmap='''Blues''' , normalize='''true''' , )
plt.title('''Normalized Confusion Matrix - IRIS Dataset''' )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 523
| 1
|
'''simple docstring'''
import math
import flax.linen as nn
import jax.numpy as jnp
def _snake_case ( A , A , A = 1 , A = 1 , A = 1.0E4 , A = False , A = 1.0 , ) -> jnp.ndarray:
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F"""Embedding dimension {embedding_dim} should be even"""
lowerCAmelCase__ = float(embedding_dim // 2 )
lowerCAmelCase__ = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
lowerCAmelCase__ = min_timescale * jnp.exp(jnp.arange(A , dtype=jnp.floataa ) * -log_timescale_increment )
lowerCAmelCase__ = jnp.expand_dims(A , 1 ) * jnp.expand_dims(A , 0 )
# scale embeddings
lowerCAmelCase__ = scale * emb
if flip_sin_to_cos:
lowerCAmelCase__ = jnp.concatenate([jnp.cos(A ), jnp.sin(A )] , axis=1 )
else:
lowerCAmelCase__ = jnp.concatenate([jnp.sin(A ), jnp.cos(A )] , axis=1 )
lowerCAmelCase__ = jnp.reshape(A , [jnp.shape(A )[0], embedding_dim] )
return signal
class a__ ( nn.Module ):
'''simple docstring'''
lowercase__ : int = 3_2
lowercase__ : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self , lowerCamelCase_ ) -> Tuple:
lowerCAmelCase__ = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_1''' )(lowerCamelCase_ )
lowerCAmelCase__ = nn.silu(lowerCamelCase_ )
lowerCAmelCase__ = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_2''' )(lowerCamelCase_ )
return temb
class a__ ( nn.Module ):
'''simple docstring'''
lowercase__ : int = 3_2
lowercase__ : bool = False
lowercase__ : float = 1
@nn.compact
def __call__( self , lowerCamelCase_ ) -> Optional[Any]:
return get_sinusoidal_embeddings(
lowerCamelCase_ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 90
|
import doctest
from collections import deque
import numpy as np
class SCREAMING_SNAKE_CASE__ :
def __init__( self):
lowercase__ : Union[str, Any] = [2, 1, 2, -1]
lowercase__ : Optional[int] = [1, 2, 3, 4]
def snake_case_ ( self):
lowercase__ : Optional[int] = len(self.first_signal)
lowercase__ : List[str] = len(self.second_signal)
lowercase__ : Dict = max(a , a)
# create a zero matrix of max_length x max_length
lowercase__ : List[str] = [[0] * max_length for i in range(a)]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(a):
lowercase__ : Tuple = deque(self.second_signal)
rotated_signal.rotate(a)
for j, item in enumerate(a):
matrix[i][j] += item
# multiply the matrix with the first signal
lowercase__ : Union[str, Any] = np.matmul(np.transpose(a) , np.transpose(self.first_signal))
# rounding-off to two decimal places
return [round(a , 2) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 164
| 0
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_A = abspath(join(dirname(dirname(dirname(__file__))), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def __UpperCamelCase ( _A ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_A )
def __UpperCamelCase ( _A ):
from transformers.testing_utils import pytest_terminal_summary_main
lowerCAmelCase_ = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(_A , id=_A )
| 325
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_A = '''▁'''
_A = {'''vocab_file''': '''spiece.model'''}
_A = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''}
}
_A = {
'''google/pegasus-xsum''': 512,
}
_A = logging.get_logger(__name__)
class A ( __UpperCAmelCase ):
__snake_case = VOCAB_FILES_NAMES
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = ['input_ids', 'attention_mask']
def __init__( self, UpperCamelCase__, UpperCamelCase__="<pad>", UpperCamelCase__="</s>", UpperCamelCase__="<unk>", UpperCamelCase__="<mask_2>", UpperCamelCase__="<mask_1>", UpperCamelCase__=None, UpperCamelCase__=103, UpperCamelCase__ = None, **UpperCamelCase__, ):
"""simple docstring"""
lowerCAmelCase_ = offset
if additional_special_tokens is not None:
if not isinstance(UpperCamelCase__, UpperCamelCase__ ):
raise TypeError(
f"additional_special_tokens should be of type {type(UpperCamelCase__ )}, but is"
f" {type(UpperCamelCase__ )}" )
lowerCAmelCase_ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"<unk_{i}>" for i in range(len(UpperCamelCase__ ), self.offset - 1 )
]
if len(set(UpperCamelCase__ ) ) != len(UpperCamelCase__ ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}." )
lowerCAmelCase_ = additional_special_tokens_extended
else:
lowerCAmelCase_ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"<unk_{i}>" for i in range(2, self.offset )]
lowerCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=UpperCamelCase__, unk_token=UpperCamelCase__, mask_token=UpperCamelCase__, pad_token=UpperCamelCase__, mask_token_sent=UpperCamelCase__, offset=UpperCamelCase__, additional_special_tokens=UpperCamelCase__, sp_model_kwargs=self.sp_model_kwargs, **UpperCamelCase__, )
lowerCAmelCase_ = mask_token_sent
lowerCAmelCase_ = vocab_file
lowerCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
# add special tokens to encoder dict
lowerCAmelCase_ = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1, self.offset - 1 )} )
lowerCAmelCase_ = {v: k for k, v in self.encoder.items()}
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return len(self.sp_model ) + self.offset
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
lowerCAmelCase_ = self.__dict__.copy()
lowerCAmelCase_ = None
return state
def __setstate__( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = d
# for backward compatibility
if not hasattr(self, '''sp_model_kwargs''' ):
lowerCAmelCase_ = {}
lowerCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
return self.sp_model.encode(UpperCamelCase__, out_type=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
lowerCAmelCase_ = self.sp_model.piece_to_id(UpperCamelCase__ )
return sp_id + self.offset
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
lowerCAmelCase_ = self.sp_model.IdToPiece(index - self.offset )
return token
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = []
lowerCAmelCase_ = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCamelCase__ ) + token
lowerCAmelCase_ = []
else:
current_sub_tokens.append(UpperCamelCase__ )
out_string += self.sp_model.decode(UpperCamelCase__ )
return out_string.strip()
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__=False ):
"""simple docstring"""
return 1
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None, UpperCamelCase__ = False ):
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(UpperCamelCase__ )
elif token_ids_a is None:
return self._special_token_mask(UpperCamelCase__ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__=None ):
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ):
"""simple docstring"""
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase_ = os.path.join(
UpperCamelCase__, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__, '''wb''' ) as fi:
lowerCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
| 325
| 1
|
'''simple docstring'''
def _UpperCamelCase ( __UpperCamelCase = 4_00_00_00 ) -> int:
lowerCamelCase_ = [0, 1]
lowerCamelCase_ = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowerCamelCase_ = 0
for j in range(len(__UpperCamelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 42
|
def __magic_name__ ( SCREAMING_SNAKE_CASE = 50 ) -> int:
_lowercase : Optional[int] = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 66
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'caidas/swin2sr-classicalsr-x2-64': (
'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'
),
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Tuple = 'swin2sr'
UpperCAmelCase__ : Optional[int] = {
'hidden_size': 'embed_dim',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self: Any , UpperCamelCase_: Any=64 , UpperCamelCase_: Optional[Any]=1 , UpperCamelCase_: Dict=3 , UpperCamelCase_: Any=1_80 , UpperCamelCase_: Optional[Any]=[6, 6, 6, 6, 6, 6] , UpperCamelCase_: Union[str, Any]=[6, 6, 6, 6, 6, 6] , UpperCamelCase_: Tuple=8 , UpperCamelCase_: int=2.0 , UpperCamelCase_: List[str]=True , UpperCamelCase_: int=0.0 , UpperCamelCase_: Any=0.0 , UpperCamelCase_: Dict=0.1 , UpperCamelCase_: Optional[Any]="gelu" , UpperCamelCase_: List[Any]=False , UpperCamelCase_: int=0.02 , UpperCamelCase_: Union[str, Any]=1E-5 , UpperCamelCase_: List[Any]=2 , UpperCamelCase_: List[Any]=1.0 , UpperCamelCase_: Any="1conv" , UpperCamelCase_: Dict="pixelshuffle" , **UpperCamelCase_: Dict , ):
super().__init__(**UpperCamelCase_ )
__lowerCamelCase = image_size
__lowerCamelCase = patch_size
__lowerCamelCase = num_channels
__lowerCamelCase = embed_dim
__lowerCamelCase = depths
__lowerCamelCase = len(UpperCamelCase_ )
__lowerCamelCase = num_heads
__lowerCamelCase = window_size
__lowerCamelCase = mlp_ratio
__lowerCamelCase = qkv_bias
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = drop_path_rate
__lowerCamelCase = hidden_act
__lowerCamelCase = use_absolute_embeddings
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = initializer_range
__lowerCamelCase = upscale
__lowerCamelCase = img_range
__lowerCamelCase = resi_connection
__lowerCamelCase = upsampler
| 80
|
from __future__ import annotations
from math import ceil, floor, sqrt
def lowerCamelCase__ ( A__ : int = 2000000 ):
'''simple docstring'''
__lowerCamelCase = [0]
__lowerCamelCase = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
__lowerCamelCase = 0
# the area corresponding to the grid that gives the product closest to target
__lowerCamelCase = 0
# an estimate of b, using the quadratic formula
__lowerCamelCase = 42
# the largest integer less than b_estimate
__lowerCamelCase = 42
# the largest integer less than b_estimate
__lowerCamelCase = 42
# the triangle number corresponding to b_floor
__lowerCamelCase = 42
# the triangle number corresponding to b_ceil
__lowerCamelCase = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
__lowerCamelCase = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
__lowerCamelCase = floor(A__ )
__lowerCamelCase = ceil(A__ )
__lowerCamelCase = triangle_numbers[b_floor]
__lowerCamelCase = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
__lowerCamelCase = triangle_b_first_guess * triangle_a
__lowerCamelCase = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
__lowerCamelCase = triangle_b_second_guess * triangle_a
__lowerCamelCase = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f"""{solution() = }""")
| 80
| 1
|
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
_snake_case : str = logging.getLogger(__name__)
def _A ( __snake_case :Optional[Any] , __snake_case :int ) -> Dict:
"""simple docstring"""
if os.path.exists(UpperCamelCase__ ):
if os.path.exists(os.path.join(UpperCamelCase__ , "config.json" ) ) and os.path.isfile(
os.path.join(UpperCamelCase__ , "config.json" ) ):
os.remove(os.path.join(UpperCamelCase__ , "config.json" ) )
if os.path.exists(os.path.join(UpperCamelCase__ , "pytorch_model.bin" ) ) and os.path.isfile(
os.path.join(UpperCamelCase__ , "pytorch_model.bin" ) ):
os.remove(os.path.join(UpperCamelCase__ , "pytorch_model.bin" ) )
else:
os.makedirs(UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
def _A ( __snake_case :Union[str, Any] , __snake_case :Any=False ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 2
if unlogit:
__SCREAMING_SNAKE_CASE = torch.pow(UpperCamelCase__ , UpperCamelCase__ )
__SCREAMING_SNAKE_CASE = p * torch.log(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE = 0
return -plogp.sum(dim=-1 )
def _A ( __snake_case :Optional[int] ) -> Any:
"""simple docstring"""
logger.info("lv, h >\t" + "\t".join(f'''{x + 1}''' for x in range(len(UpperCamelCase__ ) ) ) )
for row in range(len(UpperCamelCase__ ) ):
if tensor.dtype != torch.long:
logger.info(f'''layer {row + 1}:\t''' + "\t".join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(f'''layer {row + 1}:\t''' + "\t".join(f'''{x:d}''' for x in tensor[row].cpu().data ) )
def _A ( __snake_case :List[Any] , __snake_case :Union[str, Any] , __snake_case :List[Any] , __snake_case :int=True , __snake_case :Any=True , __snake_case :List[Any]=None , __snake_case :Dict=False ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = model.config.num_hidden_layers, model.config.num_attention_heads
__SCREAMING_SNAKE_CASE = torch.zeros(UpperCamelCase__ , UpperCamelCase__ ).to(args.device )
__SCREAMING_SNAKE_CASE = torch.zeros(UpperCamelCase__ , UpperCamelCase__ ).to(args.device )
if head_mask is None:
__SCREAMING_SNAKE_CASE = torch.ones(UpperCamelCase__ , UpperCamelCase__ ).to(args.device )
head_mask.requires_grad_(requires_grad=UpperCamelCase__ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = 0.0
__SCREAMING_SNAKE_CASE = 0.0
for step, inputs in enumerate(tqdm(UpperCamelCase__ , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ):
__SCREAMING_SNAKE_CASE = tuple(t.to(args.device ) for t in inputs )
(__SCREAMING_SNAKE_CASE ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
__SCREAMING_SNAKE_CASE = model(UpperCamelCase__ , labels=UpperCamelCase__ , head_mask=UpperCamelCase__ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
__SCREAMING_SNAKE_CASE = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = entropy(attn.detach() , UpperCamelCase__ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(UpperCamelCase__ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = torch.pow(torch.pow(UpperCamelCase__ , UpperCamelCase__ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
__SCREAMING_SNAKE_CASE = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("Attention entropies" )
print_ad_tensor(UpperCamelCase__ )
if compute_importance:
logger.info("Head importance scores" )
print_ad_tensor(UpperCamelCase__ )
logger.info("Head ranked by importance scores" )
__SCREAMING_SNAKE_CASE = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
__SCREAMING_SNAKE_CASE = torch.arange(
head_importance.numel() , device=args.device )
__SCREAMING_SNAKE_CASE = head_ranks.view_as(UpperCamelCase__ )
print_ad_tensor(UpperCamelCase__ )
return attn_entropy, head_importance, total_loss
def _A ( __snake_case :Optional[int] , __snake_case :List[str] , __snake_case :List[str] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = compute_heads_importance(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , compute_entropy=UpperCamelCase__ )
__SCREAMING_SNAKE_CASE = 1 / loss # instead of downsteam score use the LM loss
logger.info("Pruning: original score: %f, threshold: %f" , UpperCamelCase__ , original_score * args.masking_threshold )
__SCREAMING_SNAKE_CASE = torch.ones_like(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
__SCREAMING_SNAKE_CASE = original_score
while current_score >= original_score * args.masking_threshold:
__SCREAMING_SNAKE_CASE = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
__SCREAMING_SNAKE_CASE = float("Inf" )
__SCREAMING_SNAKE_CASE = head_importance.view(-1 ).sort()[1]
if len(UpperCamelCase__ ) <= num_to_mask:
print("BREAK BY num_to_mask" )
break
# mask heads
__SCREAMING_SNAKE_CASE = current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) )
__SCREAMING_SNAKE_CASE = new_head_mask.view(-1 )
__SCREAMING_SNAKE_CASE = 0.0
__SCREAMING_SNAKE_CASE = new_head_mask.view_as(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE = new_head_mask.clone().detach()
print_ad_tensor(UpperCamelCase__ )
# Compute metric and head importance again
__SCREAMING_SNAKE_CASE = compute_heads_importance(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , compute_entropy=UpperCamelCase__ , head_mask=UpperCamelCase__ )
__SCREAMING_SNAKE_CASE = 1 / loss
logger.info(
"Masking: current score: %f, remaining heads %d (%.1f percents)" , UpperCamelCase__ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("Final head mask" )
print_ad_tensor(UpperCamelCase__ )
np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() )
return head_mask
def _A ( __snake_case :Tuple , __snake_case :Optional[int] , __snake_case :Optional[Any] , __snake_case :Union[str, Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = datetime.now()
__SCREAMING_SNAKE_CASE = compute_heads_importance(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , compute_entropy=UpperCamelCase__ , compute_importance=UpperCamelCase__ , head_mask=UpperCamelCase__ )
__SCREAMING_SNAKE_CASE = 1 / loss
__SCREAMING_SNAKE_CASE = datetime.now() - before_time
__SCREAMING_SNAKE_CASE = sum(p.numel() for p in model.parameters() )
__SCREAMING_SNAKE_CASE = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(UpperCamelCase__ ) )
}
for k, v in heads_to_prune.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = [
v,
]
assert sum(len(UpperCamelCase__ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE = sum(p.numel() for p in model.parameters() )
__SCREAMING_SNAKE_CASE = datetime.now()
__SCREAMING_SNAKE_CASE = compute_heads_importance(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , compute_entropy=UpperCamelCase__ , compute_importance=UpperCamelCase__ , head_mask=UpperCamelCase__ , actually_pruned=UpperCamelCase__ , )
__SCREAMING_SNAKE_CASE = 1 / loss
__SCREAMING_SNAKE_CASE = datetime.now() - before_time
logger.info(
"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , UpperCamelCase__ , UpperCamelCase__ , pruned_num_params / original_num_params * 100 , )
logger.info("Pruning: score with masking: %f score with pruning: %f" , UpperCamelCase__ , UpperCamelCase__ )
logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 100 )
save_model(UpperCamelCase__ , args.output_dir )
def _A ( ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , )
parser.add_argument(
"--model_name_or_path" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--output_dir" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="The output directory where the model predictions and checkpoints will be written." , )
# Other parameters
parser.add_argument(
"--config_name" , default="" , type=UpperCamelCase__ , help="Pretrained config name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--tokenizer_name" , default="" , type=UpperCamelCase__ , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--cache_dir" , default=UpperCamelCase__ , type=UpperCamelCase__ , help="Where do you want to store the pre-trained models downloaded from s3" , )
parser.add_argument(
"--data_subset" , type=UpperCamelCase__ , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." )
parser.add_argument(
"--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
parser.add_argument(
"--dont_normalize_importance_by_layer" , action="store_true" , help="Don't normalize importance score by layers" )
parser.add_argument(
"--dont_normalize_global_importance" , action="store_true" , help="Don't normalize all importance scores between 0 and 1" , )
parser.add_argument(
"--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." )
parser.add_argument(
"--masking_threshold" , default=0.9 , type=UpperCamelCase__ , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , )
parser.add_argument(
"--masking_amount" , default=0.1 , type=UpperCamelCase__ , help="Amount to heads to masking at each masking step." )
parser.add_argument("--metric_name" , default="acc" , type=UpperCamelCase__ , help="Metric to use for head masking." )
parser.add_argument(
"--max_seq_length" , default=128 , type=UpperCamelCase__ , help=(
"The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded."
) , )
parser.add_argument("--batch_size" , default=1 , type=UpperCamelCase__ , help="Batch size." )
parser.add_argument("--seed" , type=UpperCamelCase__ , default=42 )
parser.add_argument("--local_rank" , type=UpperCamelCase__ , default=-1 , help="local_rank for distributed training on gpus" )
parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" )
parser.add_argument("--server_ip" , type=UpperCamelCase__ , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=UpperCamelCase__ , default="" , help="Can be used for distant debugging." )
__SCREAMING_SNAKE_CASE = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=UpperCamelCase__ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
__SCREAMING_SNAKE_CASE = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" )
__SCREAMING_SNAKE_CASE = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
__SCREAMING_SNAKE_CASE = torch.device("cuda" , args.local_rank )
__SCREAMING_SNAKE_CASE = 1
torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
__SCREAMING_SNAKE_CASE = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
__SCREAMING_SNAKE_CASE = nn.parallel.DistributedDataParallel(
UpperCamelCase__ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=UpperCamelCase__ )
elif args.n_gpu > 1:
__SCREAMING_SNAKE_CASE = nn.DataParallel(UpperCamelCase__ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=UpperCamelCase__ )
torch.save(UpperCamelCase__ , os.path.join(args.output_dir , "run_args.bin" ) )
logger.info("Training/evaluation parameters %s" , UpperCamelCase__ )
# Prepare dataset
__SCREAMING_SNAKE_CASE = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
__SCREAMING_SNAKE_CASE = (torch.from_numpy(UpperCamelCase__ ),)
__SCREAMING_SNAKE_CASE = TensorDataset(*UpperCamelCase__ )
__SCREAMING_SNAKE_CASE = RandomSampler(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE = DataLoader(UpperCamelCase__ , sampler=UpperCamelCase__ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
__SCREAMING_SNAKE_CASE = mask_heads(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
prune_heads(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 693
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def snake_case ( UpperCamelCase__ : Any ) -> Optional[Any]:
# vision encoder
if "img_encoder.pos_embed" in name:
lowerCamelCase : Any = name.replace("""img_encoder.pos_embed""" , """vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
lowerCamelCase : List[Any] = name.replace("""img_encoder.patch_embed.proj""" , """vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
lowerCamelCase : str = name.replace("""img_encoder.patch_embed.norm""" , """vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
lowerCamelCase : Tuple = name.replace("""img_encoder.layers""" , """vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
lowerCamelCase : Optional[Any] = name.replace("""blocks""" , """layers""" )
if "attn" in name and "pre_assign" not in name:
lowerCamelCase : Union[str, Any] = name.replace("""attn""" , """self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
lowerCamelCase : List[str] = name.replace("""proj""" , """out_proj""" )
if "pre_assign_attn.attn.proj" in name:
lowerCamelCase : List[Any] = name.replace("""pre_assign_attn.attn.proj""" , """pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
lowerCamelCase : int = name.replace("""norm1""" , """layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
lowerCamelCase : Union[str, Any] = name.replace("""norm2""" , """layer_norm2""" )
if "img_encoder.norm" in name:
lowerCamelCase : Dict = name.replace("""img_encoder.norm""" , """vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
lowerCamelCase : List[Any] = name.replace("""text_encoder.token_embedding""" , """text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
lowerCamelCase : int = name.replace("""text_encoder.positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
lowerCamelCase : Any = name.replace("""text_encoder.transformer.resblocks.""" , """text_model.encoder.layers.""" )
if "ln_1" in name:
lowerCamelCase : Dict = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
lowerCamelCase : str = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
lowerCamelCase : List[Any] = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
lowerCamelCase : str = name.replace("""c_proj""" , """fc2""" )
if "text_encoder" in name:
lowerCamelCase : Tuple = name.replace("""text_encoder""" , """text_model""" )
if "ln_final" in name:
lowerCamelCase : int = name.replace("""ln_final""" , """final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
lowerCamelCase : str = name.replace("""img_projector.linear_hidden.""" , """visual_projection.""" )
if "img_projector.linear_out." in name:
lowerCamelCase : List[Any] = name.replace("""img_projector.linear_out.""" , """visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
lowerCamelCase : Dict = name.replace("""text_projector.linear_hidden""" , """text_projection""" )
if "text_projector.linear_out" in name:
lowerCamelCase : Optional[Any] = name.replace("""text_projector.linear_out""" , """text_projection.3""" )
return name
def snake_case ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] ) -> Any:
for key in orig_state_dict.copy().keys():
lowerCamelCase : List[Any] = orig_state_dict.pop(UpperCamelCase__ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCamelCase : Tuple = key.split(""".""" )
lowerCamelCase , lowerCamelCase : List[str] = int(key_split[2] ), int(key_split[4] )
lowerCamelCase : Optional[Any] = config.vision_config.hidden_size
if "weight" in key:
lowerCamelCase : Dict = val[:dim, :]
lowerCamelCase : Any = val[dim : dim * 2, :]
lowerCamelCase : int = val[-dim:, :]
else:
lowerCamelCase : Tuple = val[:dim]
lowerCamelCase : Tuple = val[dim : dim * 2]
lowerCamelCase : Union[str, Any] = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCamelCase : str = key.split(""".""" )
lowerCamelCase : Union[str, Any] = int(key_split[3] )
lowerCamelCase : int = config.text_config.hidden_size
if "weight" in key:
lowerCamelCase : List[Any] = val[:dim, :]
lowerCamelCase : List[Any] = val[
dim : dim * 2, :
]
lowerCamelCase : List[str] = val[-dim:, :]
else:
lowerCamelCase : List[Any] = val[:dim]
lowerCamelCase : str = val[dim : dim * 2]
lowerCamelCase : Optional[Any] = val[-dim:]
else:
lowerCamelCase : int = rename_key(UpperCamelCase__ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowerCamelCase : str = val.squeeze_()
else:
lowerCamelCase : Any = val
return orig_state_dict
def snake_case ( ) -> List[Any]:
lowerCamelCase : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase : List[str] = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def snake_case ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple="groupvit-gcc-yfcc" , UpperCamelCase__ : List[str]=False ) -> Optional[int]:
lowerCamelCase : Optional[int] = GroupViTConfig()
lowerCamelCase : Tuple = GroupViTModel(UpperCamelCase__ ).eval()
lowerCamelCase : List[str] = torch.load(UpperCamelCase__ , map_location="""cpu""" )["""model"""]
lowerCamelCase : Any = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase , lowerCamelCase : int = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(UpperCamelCase__ ) == 0)
# verify result
lowerCamelCase : Optional[int] = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
lowerCamelCase : int = prepare_img()
lowerCamelCase : Optional[Any] = processor(text=["""a photo of a cat""", """a photo of a dog"""] , images=UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors="""pt""" )
with torch.no_grad():
lowerCamelCase : Dict = model(**UpperCamelCase__ )
if model_name == "groupvit-gcc-yfcc":
lowerCamelCase : List[str] = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
lowerCamelCase : Tuple = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(F'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , UpperCamelCase__ , atol=1E-3 )
processor.save_pretrained(UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
print("""Successfully saved processor and model to""" , UpperCamelCase__ )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(UpperCamelCase__ , organization="""nielsr""" )
model.push_to_hub(UpperCamelCase__ , organization="""nielsr""" )
if __name__ == "__main__":
__lowerCamelCase :Any = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.',
)
__lowerCamelCase :int = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 222
| 0
|
import random
class a :
"""simple docstring"""
@staticmethod
def __snake_case ( lowerCamelCase : str ) -> tuple[list[int], list[int]]:
__snake_case : int = [ord(lowerCamelCase ) for i in text]
__snake_case : int = []
__snake_case : str = []
for i in plain:
__snake_case : Union[str, Any] = random.randint(1 , 300 )
__snake_case : Dict = (i + k) * k
cipher.append(lowerCamelCase )
key.append(lowerCamelCase )
return cipher, key
@staticmethod
def __snake_case ( lowerCamelCase : list[int] , lowerCamelCase : list[int] ) -> str:
__snake_case : List[Any] = []
for i in range(len(lowerCamelCase ) ):
__snake_case : int = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(lowerCamelCase ) )
return "".join(lowerCamelCase )
if __name__ == "__main__":
_snake_case , _snake_case : int = Onepad().encrypt("Hello")
print(c, k)
print(Onepad().decrypt(c, k))
| 203
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a (_lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = LDMTextToImagePipeline
__UpperCAmelCase : List[Any] = TEXT_TO_IMAGE_PARAMS - {
"negative_prompt",
"negative_prompt_embeds",
"cross_attention_kwargs",
"prompt_embeds",
}
__UpperCAmelCase : int = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"callback",
"callback_steps",
}
__UpperCAmelCase : Any = TEXT_TO_IMAGE_BATCH_PARAMS
__UpperCAmelCase : int = False
def __snake_case ( self : Union[str, Any] ) -> str:
torch.manual_seed(0 )
__snake_case : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
__snake_case : Any = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=lowerCamelCase , set_alpha_to_one=lowerCamelCase , )
torch.manual_seed(0 )
__snake_case : Dict = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , latent_channels=4 , )
torch.manual_seed(0 )
__snake_case : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__snake_case : Any = CLIPTextModel(lowerCamelCase )
__snake_case : Dict = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__snake_case : List[str] = {
"unet": unet,
"scheduler": scheduler,
"vqvae": vae,
"bert": text_encoder,
"tokenizer": tokenizer,
}
return components
def __snake_case ( self : List[Any] , lowerCamelCase : str , lowerCamelCase : Any=0 ) -> str:
if str(lowerCamelCase ).startswith("mps" ):
__snake_case : Union[str, Any] = torch.manual_seed(lowerCamelCase )
else:
__snake_case : str = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
__snake_case : Dict = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __snake_case ( self : Dict ) -> Optional[Any]:
__snake_case : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
__snake_case : Union[str, Any] = self.get_dummy_components()
__snake_case : int = LDMTextToImagePipeline(**lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__snake_case : Optional[Any] = self.get_dummy_inputs(lowerCamelCase )
__snake_case : Optional[Any] = pipe(**lowerCamelCase ).images
__snake_case : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
__snake_case : int = np.array([0.61_01, 0.61_56, 0.56_22, 0.48_95, 0.66_61, 0.38_04, 0.57_48, 0.61_36, 0.50_14] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : int ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : Optional[int] , lowerCamelCase : Dict , lowerCamelCase : Any=torch.floataa , lowerCamelCase : List[Any]=0 ) -> Dict:
__snake_case : List[Any] = torch.manual_seed(lowerCamelCase )
__snake_case : Tuple = np.random.RandomState(lowerCamelCase ).standard_normal((1, 4, 32, 32) )
__snake_case : int = torch.from_numpy(lowerCamelCase ).to(device=lowerCamelCase , dtype=lowerCamelCase )
__snake_case : List[str] = {
"prompt": "A painting of a squirrel eating a burger",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __snake_case ( self : Tuple ) -> Optional[Any]:
__snake_case : str = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256" ).to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__snake_case : List[str] = self.get_inputs(lowerCamelCase )
__snake_case : str = pipe(**lowerCamelCase ).images
__snake_case : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
__snake_case : int = np.array([0.5_18_25, 0.5_28_50, 0.5_25_43, 0.5_42_58, 0.5_23_04, 0.5_25_69, 0.5_43_63, 0.5_52_76, 0.5_68_78] )
__snake_case : List[str] = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1E-3
@nightly
@require_torch_gpu
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : List[Any] ) -> List[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : Any , lowerCamelCase : Dict , lowerCamelCase : Any=torch.floataa , lowerCamelCase : List[Any]=0 ) -> Optional[Any]:
__snake_case : int = torch.manual_seed(lowerCamelCase )
__snake_case : Tuple = np.random.RandomState(lowerCamelCase ).standard_normal((1, 4, 32, 32) )
__snake_case : Optional[Any] = torch.from_numpy(lowerCamelCase ).to(device=lowerCamelCase , dtype=lowerCamelCase )
__snake_case : List[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"latents": latents,
"generator": generator,
"num_inference_steps": 50,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __snake_case ( self : List[str] ) -> int:
__snake_case : Union[str, Any] = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256" ).to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__snake_case : Optional[Any] = self.get_inputs(lowerCamelCase )
__snake_case : str = pipe(**lowerCamelCase ).images[0]
__snake_case : Tuple = load_numpy(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy" )
__snake_case : str = np.abs(expected_image - image ).max()
assert max_diff < 1E-3
| 203
| 1
|
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self :int , lowerCamelCase__ :int , lowerCamelCase__ :Optional[int] ):
super().__init__()
self.register_modules(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
@torch.no_grad()
def __call__( self :Optional[int] , lowerCamelCase__ :int = 1 , lowerCamelCase__ :int = 1_00 , lowerCamelCase__ :Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase__ :Optional[float] = None , lowerCamelCase__ :bool = True , ):
if audio_length_in_s is None:
UpperCamelCase__ :List[Any] = self.unet.config.sample_size / self.unet.config.sample_rate
UpperCamelCase__ :Tuple = audio_length_in_s * self.unet.config.sample_rate
UpperCamelCase__ :str = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f"""{audio_length_in_s} is too small. Make sure it's bigger or equal to"""
f""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" )
UpperCamelCase__ :Tuple = int(lowerCamelCase__ )
if sample_size % down_scale_factor != 0:
UpperCamelCase__ :Union[str, Any] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"""
f""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"""
""" process.""" )
UpperCamelCase__ :Dict = int(lowerCamelCase__ )
UpperCamelCase__ :Any = next(iter(self.unet.parameters() ) ).dtype
UpperCamelCase__ :Union[str, Any] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(lowerCamelCase__ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
UpperCamelCase__ :Union[str, Any] = randn_tensor(lowerCamelCase__ , generator=lowerCamelCase__ , device=self.device , dtype=lowerCamelCase__ )
# set step values
self.scheduler.set_timesteps(lowerCamelCase__ , device=audio.device )
UpperCamelCase__ :List[Any] = self.scheduler.timesteps.to(lowerCamelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCamelCase__ :Tuple = self.unet(lowerCamelCase__ , lowerCamelCase__ ).sample
# 2. compute previous image: x_t -> t_t-1
UpperCamelCase__ :str = self.scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
UpperCamelCase__ :Any = audio.clamp(-1 , 1 ).float().cpu().numpy()
UpperCamelCase__ :Optional[int] = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowerCamelCase__ )
| 45
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase__ ( self : Dict ):
_a = 1
_a = 3
_a = (32, 32)
_a = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__a )
return image
@property
def UpperCamelCase__ ( self : Dict ):
torch.manual_seed(0 )
_a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def UpperCamelCase__ ( self : Optional[int] ):
torch.manual_seed(0 )
_a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def UpperCamelCase__ ( self : Optional[Any] ):
torch.manual_seed(0 )
_a = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_06 , )
return RobertaSeriesModelWithTransformation(__a )
@property
def UpperCamelCase__ ( self : str ):
def extract(*__a : Tuple , **__a : str ):
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Dict ):
_a = torch.ones([0] )
def UpperCamelCase__ ( self : List[str] , __a : Dict ):
self.pixel_values.to(__a )
return self
return Out()
return extract
def UpperCamelCase__ ( self : Optional[int] ):
_a = "cpu" # ensure determinism for the device-dependent torch.Generator
_a = self.dummy_cond_unet
_a = PNDMScheduler(skip_prk_steps=__a )
_a = self.dummy_vae
_a = self.dummy_text_encoder
_a = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
_a = 77
_a = self.dummy_image.to(__a )
_a = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
_a = AltDiffusionImgaImgPipeline(
unet=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , safety_checker=__a , feature_extractor=self.dummy_extractor , )
_a = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__a )
_a = alt_pipe.to(__a )
alt_pipe.set_progress_bar_config(disable=__a )
_a = "A painting of a squirrel eating a burger"
_a = torch.Generator(device=__a ).manual_seed(0 )
_a = alt_pipe(
[prompt] , generator=__a , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=__a , )
_a = output.images
_a = torch.Generator(device=__a ).manual_seed(0 )
_a = alt_pipe(
[prompt] , generator=__a , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=__a , return_dict=__a , )[0]
_a = image[0, -3:, -3:, -1]
_a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_a = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def UpperCamelCase__ ( self : Optional[int] ):
_a = self.dummy_cond_unet
_a = PNDMScheduler(skip_prk_steps=__a )
_a = self.dummy_vae
_a = self.dummy_text_encoder
_a = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
_a = 77
_a = self.dummy_image.to(__a )
# put models in fp16
_a = unet.half()
_a = vae.half()
_a = bert.half()
# make sure here that pndm scheduler skips prk
_a = AltDiffusionImgaImgPipeline(
unet=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , safety_checker=__a , feature_extractor=self.dummy_extractor , )
_a = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__a )
_a = alt_pipe.to(__a )
alt_pipe.set_progress_bar_config(disable=__a )
_a = "A painting of a squirrel eating a burger"
_a = torch.manual_seed(0 )
_a = alt_pipe(
[prompt] , generator=__a , num_inference_steps=2 , output_type="np" , image=__a , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def UpperCamelCase__ ( self : Optional[Any] ):
_a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
_a = init_image.resize((7_60, 5_04) )
_a = "BAAI/AltDiffusion"
_a = AltDiffusionImgaImgPipeline.from_pretrained(
__a , safety_checker=__a , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
_a = "A fantasy landscape, trending on artstation"
_a = torch.manual_seed(0 )
_a = pipe(
prompt=__a , image=__a , strength=0.75 , guidance_scale=7.5 , generator=__a , output_type="np" , )
_a = output.images[0]
_a = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 7_60, 3)
_a = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : Dict ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
_a = init_image.resize((7_68, 5_12) )
_a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
_a = "BAAI/AltDiffusion"
_a = AltDiffusionImgaImgPipeline.from_pretrained(
__a , safety_checker=__a , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
_a = "A fantasy landscape, trending on artstation"
_a = torch.manual_seed(0 )
_a = pipe(
prompt=__a , image=__a , strength=0.75 , guidance_scale=7.5 , generator=__a , output_type="np" , )
_a = output.images[0]
assert image.shape == (5_12, 7_68, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 692
| 0
|
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class _UpperCAmelCase ( lowercase_ ):
a = '''informer'''
a = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self , a__ = None , a__ = None , a__ = "student_t" , a__ = "nll" , a__ = 1 , a__ = None , a__ = "mean" , a__ = 0 , a__ = 0 , a__ = 0 , a__ = 0 , a__ = None , a__ = None , a__ = 64 , a__ = 32 , a__ = 32 , a__ = 2 , a__ = 2 , a__ = 2 , a__ = 2 , a__ = True , a__ = "gelu" , a__ = 0.05 , a__ = 0.1 , a__ = 0.1 , a__ = 0.1 , a__ = 0.1 , a__ = 100 , a__ = 0.02 , a__=True , a__ = "prob" , a__ = 5 , a__ = True , **a__ , ):
A_ : List[Any] = prediction_length
A_ : Tuple = context_length or prediction_length
A_ : Dict = distribution_output
A_ : Optional[Any] = loss
A_ : Optional[Any] = input_size
A_ : Dict = num_time_features
A_ : Dict = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
A_ : Optional[int] = scaling
A_ : List[Any] = num_dynamic_real_features
A_ : str = num_static_real_features
A_ : Union[str, Any] = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(lowerCamelCase_ ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
A_ : Optional[Any] = cardinality
else:
A_ : Any = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(lowerCamelCase_ ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
A_ : List[str] = embedding_dimension
else:
A_ : Union[str, Any] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
A_ : Optional[int] = num_parallel_samples
# Transformer architecture configuration
A_ : List[str] = input_size * len(self.lags_sequence ) + self._number_of_features
A_ : str = d_model
A_ : Union[str, Any] = encoder_attention_heads
A_ : List[Any] = decoder_attention_heads
A_ : int = encoder_ffn_dim
A_ : List[Any] = decoder_ffn_dim
A_ : Optional[Any] = encoder_layers
A_ : str = decoder_layers
A_ : List[Any] = dropout
A_ : Tuple = attention_dropout
A_ : Any = activation_dropout
A_ : Dict = encoder_layerdrop
A_ : Tuple = decoder_layerdrop
A_ : Tuple = activation_function
A_ : Any = init_std
A_ : Dict = use_cache
# Informer
A_ : List[Any] = attention_type
A_ : Tuple = sampling_factor
A_ : Any = distil
super().__init__(is_encoder_decoder=lowerCamelCase_ , **lowerCamelCase_ )
@property
def _lowerCamelCase ( self ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 713
|
def _lowerCAmelCase ( _lowerCAmelCase ):
'''simple docstring'''
A_ : Union[str, Any] = [0] * len(_lowerCAmelCase )
A_ : Optional[int] = []
A_ : str = []
A_ : Dict = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_lowerCAmelCase ) ):
if indegree[i] == 0:
queue.append(_lowerCAmelCase )
while queue:
A_ : List[str] = queue.pop(0 )
cnt += 1
topo.append(_lowerCAmelCase )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_lowerCAmelCase )
if cnt != len(_lowerCAmelCase ):
print("""Cycle exists""" )
else:
print(_lowerCAmelCase )
# Adjacency List of Graph
_lowerCAmelCase = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 481
| 0
|
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowercase__ : Any = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def _lowerCAmelCase ( __snake_case : Optional[int] , __snake_case : Optional[Any] ) -> str:
inspect_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__A : Optional[Any] = path + '.py'
assert script_name in os.listdir(SCREAMING_SNAKE_CASE_ )
assert "__pycache__" not in os.listdir(SCREAMING_SNAKE_CASE_ )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def _lowerCAmelCase ( __snake_case : List[Any] , __snake_case : int ) -> Dict:
inspect_metric(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__A : Optional[int] = path + '.py'
assert script_name in os.listdir(SCREAMING_SNAKE_CASE_ )
assert "__pycache__" not in os.listdir(SCREAMING_SNAKE_CASE_ )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def _lowerCAmelCase ( __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : Optional[int] ) -> List[Any]:
__A : List[str] = get_dataset_config_info(SCREAMING_SNAKE_CASE_ , config_name=SCREAMING_SNAKE_CASE_ )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def _lowerCAmelCase ( __snake_case : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : Optional[Any] ) -> List[str]:
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
get_dataset_config_info(SCREAMING_SNAKE_CASE_ , config_name=SCREAMING_SNAKE_CASE_ )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def _lowerCAmelCase ( __snake_case : Union[str, Any] , __snake_case : List[str] ) -> int:
__A : Dict = get_dataset_config_names(SCREAMING_SNAKE_CASE_ )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def _lowerCAmelCase ( __snake_case : str , __snake_case : Dict , __snake_case : List[Any] ) -> str:
__A : List[str] = get_dataset_infos(SCREAMING_SNAKE_CASE_ )
assert list(infos.keys() ) == expected_configs
__A : List[Any] = expected_configs[0]
assert expected_config in infos
__A : List[str] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def _lowerCAmelCase ( __snake_case : Any , __snake_case : Optional[int] , __snake_case : Union[str, Any] ) -> List[str]:
__A : Any = get_dataset_infos(SCREAMING_SNAKE_CASE_ )
assert expected_config in infos
__A : Union[str, Any] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def _lowerCAmelCase ( __snake_case : int , __snake_case : Union[str, Any] , __snake_case : Optional[Any] ) -> Any:
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
get_dataset_split_names(SCREAMING_SNAKE_CASE_ , config_name=SCREAMING_SNAKE_CASE_ )
| 8
|
from ...configuration_utils import PretrainedConfig
lowerCAmelCase__ = {
"""google/tapas-base-finetuned-sqa""": (
"""https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-wtq""": (
"""https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-wikisql-supervised""": (
"""https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-tabfact""": (
"""https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"""
),
}
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = 'tapas'
def __init__( self , lowercase=30522 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=1024 , lowercase=[3, 256, 256, 2, 256, 256, 10] , lowercase=0.02 , lowercase=1e-12 , lowercase=0 , lowercase=10.0 , lowercase=0 , lowercase=1.0 , lowercase=None , lowercase=1.0 , lowercase=False , lowercase=None , lowercase=1.0 , lowercase=1.0 , lowercase=False , lowercase=False , lowercase="ratio" , lowercase=None , lowercase=None , lowercase=64 , lowercase=32 , lowercase=False , lowercase=True , lowercase=False , lowercase=False , lowercase=True , lowercase=False , lowercase=None , lowercase=None , **lowercase , ) -> str:
'''simple docstring'''
super().__init__(pad_token_id=lowercase , **lowercase )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_sizes
A__ = initializer_range
A__ = layer_norm_eps
# Fine-tuning task hyperparameters
A__ = positive_label_weight
A__ = num_aggregation_labels
A__ = aggregation_loss_weight
A__ = use_answer_as_supervision
A__ = answer_loss_importance
A__ = use_normalized_answer_loss
A__ = huber_loss_delta
A__ = temperature
A__ = aggregation_temperature
A__ = use_gumbel_for_cells
A__ = use_gumbel_for_aggregation
A__ = average_approximation_function
A__ = cell_selection_preference
A__ = answer_loss_cutoff
A__ = max_num_rows
A__ = max_num_columns
A__ = average_logits_per_cell
A__ = select_one_column
A__ = allow_empty_column_selection
A__ = init_cell_selection_weights_to_zero
A__ = reset_position_index_per_cell
A__ = disable_per_token_loss
# Aggregation hyperparameters
A__ = aggregation_labels
A__ = no_aggregation_label_index
if isinstance(self.aggregation_labels , lowercase ):
A__ = {int(lowercase ): v for k, v in aggregation_labels.items()}
| 514
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def __A(lowerCAmelCase ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
_UpperCamelCase = [1_4_4, 1_9_2, 2_4_0]
_UpperCamelCase = [1_6, 3_2, 6_4, 9_6, 1_2_8, 1_6_0, 6_4_0]
elif "mobilevit_xs" in mobilevit_name:
_UpperCamelCase = [9_6, 1_2_0, 1_4_4]
_UpperCamelCase = [1_6, 3_2, 4_8, 6_4, 8_0, 9_6, 3_8_4]
elif "mobilevit_xxs" in mobilevit_name:
_UpperCamelCase = [6_4, 8_0, 9_6]
_UpperCamelCase = [1_6, 1_6, 2_4, 4_8, 6_4, 8_0, 3_2_0]
_UpperCamelCase = 0.05
_UpperCamelCase = 2.0
if mobilevit_name.startswith("""deeplabv3_""" ):
_UpperCamelCase = 5_1_2
_UpperCamelCase = 1_6
_UpperCamelCase = 2_1
_UpperCamelCase = """pascal-voc-id2label.json"""
else:
_UpperCamelCase = 1_0_0_0
_UpperCamelCase = """imagenet-1k-id2label.json"""
_UpperCamelCase = """huggingface/label-files"""
_UpperCamelCase = json.load(open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
_UpperCamelCase = {int(lowerCAmelCase ): v for k, v in idalabel.items()}
_UpperCamelCase = idalabel
_UpperCamelCase = {v: k for k, v in idalabel.items()}
return config
def __A(lowerCAmelCase , lowerCAmelCase=False ) -> List[Any]:
"""simple docstring"""
for i in range(1 , 6 ):
if F'layer_{i}.' in name:
_UpperCamelCase = name.replace(F'layer_{i}.' , F'encoder.layer.{i - 1}.' )
if "conv_1." in name:
_UpperCamelCase = name.replace("""conv_1.""" , """conv_stem.""" )
if ".block." in name:
_UpperCamelCase = name.replace(""".block.""" , """.""" )
if "exp_1x1" in name:
_UpperCamelCase = name.replace("""exp_1x1""" , """expand_1x1""" )
if "red_1x1" in name:
_UpperCamelCase = name.replace("""red_1x1""" , """reduce_1x1""" )
if ".local_rep.conv_3x3." in name:
_UpperCamelCase = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""" )
if ".local_rep.conv_1x1." in name:
_UpperCamelCase = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""" )
if ".norm." in name:
_UpperCamelCase = name.replace(""".norm.""" , """.normalization.""" )
if ".conv." in name:
_UpperCamelCase = name.replace(""".conv.""" , """.convolution.""" )
if ".conv_proj." in name:
_UpperCamelCase = name.replace(""".conv_proj.""" , """.conv_projection.""" )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F'.{i}.{j}.' in name:
_UpperCamelCase = name.replace(F'.{i}.{j}.' , F'.{i}.layer.{j}.' )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F'.{i}.{j}.' in name:
_UpperCamelCase = name.replace(F'.{i}.{j}.' , F'.{i}.' )
if "expand_1x1" in name:
_UpperCamelCase = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""" )
if "conv_3x3" in name:
_UpperCamelCase = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""" )
if "reduce_1x1" in name:
_UpperCamelCase = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""" )
for i in range(2 , 5 ):
if F'.global_rep.{i}.weight' in name:
_UpperCamelCase = name.replace(F'.global_rep.{i}.weight' , """.layernorm.weight""" )
if F'.global_rep.{i}.bias' in name:
_UpperCamelCase = name.replace(F'.global_rep.{i}.bias' , """.layernorm.bias""" )
if ".global_rep." in name:
_UpperCamelCase = name.replace(""".global_rep.""" , """.transformer.""" )
if ".pre_norm_mha.0." in name:
_UpperCamelCase = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""" )
if ".pre_norm_mha.1.out_proj." in name:
_UpperCamelCase = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""" )
if ".pre_norm_ffn.0." in name:
_UpperCamelCase = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""" )
if ".pre_norm_ffn.1." in name:
_UpperCamelCase = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""" )
if ".pre_norm_ffn.4." in name:
_UpperCamelCase = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""" )
if ".transformer." in name:
_UpperCamelCase = name.replace(""".transformer.""" , """.transformer.layer.""" )
if ".aspp_layer." in name:
_UpperCamelCase = name.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in name:
_UpperCamelCase = name.replace(""".aspp_pool.""" , """.""" )
if "seg_head." in name:
_UpperCamelCase = name.replace("""seg_head.""" , """segmentation_head.""" )
if "segmentation_head.classifier.classifier." in name:
_UpperCamelCase = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""" )
if "classifier.fc." in name:
_UpperCamelCase = name.replace("""classifier.fc.""" , """classifier.""" )
elif (not base_model) and ("segmentation_head." not in name):
_UpperCamelCase = """mobilevit.""" + name
return name
def __A(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ) -> str:
"""simple docstring"""
if base_model:
_UpperCamelCase = """"""
else:
_UpperCamelCase = """mobilevit."""
for key in orig_state_dict.copy().keys():
_UpperCamelCase = orig_state_dict.pop(lowerCAmelCase )
if key[:8] == "encoder.":
_UpperCamelCase = key[8:]
if "qkv" in key:
_UpperCamelCase = key.split(""".""" )
_UpperCamelCase = int(key_split[0][6:] ) - 1
_UpperCamelCase = int(key_split[3] )
_UpperCamelCase = model.get_submodule(F'{model_prefix}encoder.layer.{layer_num}' )
_UpperCamelCase = layer.transformer.layer[transformer_num].attention.attention.all_head_size
_UpperCamelCase = (
F'{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'
)
if "weight" in key:
_UpperCamelCase = val[:dim, :]
_UpperCamelCase = val[dim : dim * 2, :]
_UpperCamelCase = val[-dim:, :]
else:
_UpperCamelCase = val[:dim]
_UpperCamelCase = val[dim : dim * 2]
_UpperCamelCase = val[-dim:]
else:
_UpperCamelCase = val
return orig_state_dict
def __A() -> Dict:
"""simple docstring"""
_UpperCamelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_UpperCamelCase = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __A(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = get_mobilevit_config(lowerCAmelCase )
# load original state_dict
_UpperCamelCase = torch.load(lowerCAmelCase , map_location="""cpu""" )
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_""" ):
_UpperCamelCase = MobileViTForSemanticSegmentation(lowerCAmelCase ).eval()
else:
_UpperCamelCase = MobileViTForImageClassification(lowerCAmelCase ).eval()
_UpperCamelCase = convert_state_dict(lowerCAmelCase , lowerCAmelCase )
model.load_state_dict(lowerCAmelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
_UpperCamelCase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 3_2 )
_UpperCamelCase = image_processor(images=prepare_img() , return_tensors="""pt""" )
_UpperCamelCase = model(**lowerCAmelCase )
_UpperCamelCase = outputs.logits
if mobilevit_name.startswith("""deeplabv3_""" ):
assert logits.shape == (1, 2_1, 3_2, 3_2)
if mobilevit_name == "deeplabv3_mobilevit_s":
_UpperCamelCase = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
_UpperCamelCase = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
_UpperCamelCase = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
] )
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}' )
assert torch.allclose(logits[0, :3, :3, :3] , lowerCAmelCase , atol=1e-4 )
else:
assert logits.shape == (1, 1_0_0_0)
if mobilevit_name == "mobilevit_s":
_UpperCamelCase = torch.tensor([-0.9866, 0.2392, -1.1241] )
elif mobilevit_name == "mobilevit_xs":
_UpperCamelCase = torch.tensor([-2.4761, -0.9399, -1.9587] )
elif mobilevit_name == "mobilevit_xxs":
_UpperCamelCase = torch.tensor([-1.9364, -1.2327, -0.4653] )
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}' )
assert torch.allclose(logits[0, :3] , lowerCAmelCase , atol=1e-4 )
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase )
print(F'Saving model {mobilevit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCAmelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCAmelCase )
if push_to_hub:
_UpperCamelCase = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""" )
_UpperCamelCase = model_mapping[mobilevit_name]
image_processor.push_to_hub(lowerCAmelCase , organization="""apple""" )
model.push_to_hub(lowerCAmelCase , organization="""apple""" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--mobilevit_name",
default="mobilevit_s",
type=str,
help=(
"Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',"
" 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'."
),
)
parser.add_argument(
"--checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
lowerCamelCase__ = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 710
|
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__ :
def __init__( self , a , a=13 , a=7 , a=True , a=True , a=True , a=True , a=99 , a=32 , a=5 , a=4 , a=37 , a="gelu" , a=0.1 , a=0.1 , a=1_28 , a=32 , a=16 , a=2 , a=0.02 , a=3 , a=4 , a=None , ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = scope
def A_ ( self ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A_ ( self ) -> Optional[Any]:
'''simple docstring'''
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
def A_ ( self ) -> int:
'''simple docstring'''
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = self.prepare_config_and_inputs()
_UpperCamelCase = True
_UpperCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def A_ ( self , a , a , a , a , a , a , a ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = NezhaModel(config=a )
model.to(a )
model.eval()
_UpperCamelCase = model(a , attention_mask=a , token_type_ids=a )
_UpperCamelCase = model(a , token_type_ids=a )
_UpperCamelCase = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A_ ( self , a , a , a , a , a , a , a , a , a , ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = True
_UpperCamelCase = NezhaModel(a )
model.to(a )
model.eval()
_UpperCamelCase = model(
a , attention_mask=a , token_type_ids=a , encoder_hidden_states=a , encoder_attention_mask=a , )
_UpperCamelCase = model(
a , attention_mask=a , token_type_ids=a , encoder_hidden_states=a , )
_UpperCamelCase = model(a , attention_mask=a , token_type_ids=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A_ ( self , a , a , a , a , a , a , a ) -> Dict:
'''simple docstring'''
_UpperCamelCase = NezhaForMaskedLM(config=a )
model.to(a )
model.eval()
_UpperCamelCase = model(a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self , a , a , a , a , a , a , a ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = NezhaForNextSentencePrediction(config=a )
model.to(a )
model.eval()
_UpperCamelCase = model(
a , attention_mask=a , token_type_ids=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def A_ ( self , a , a , a , a , a , a , a ) -> Dict:
'''simple docstring'''
_UpperCamelCase = NezhaForPreTraining(config=a )
model.to(a )
model.eval()
_UpperCamelCase = model(
a , attention_mask=a , token_type_ids=a , labels=a , next_sentence_label=a , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def A_ ( self , a , a , a , a , a , a , a ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = NezhaForQuestionAnswering(config=a )
model.to(a )
model.eval()
_UpperCamelCase = model(
a , attention_mask=a , token_type_ids=a , start_positions=a , end_positions=a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self , a , a , a , a , a , a , a ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = NezhaForSequenceClassification(a )
model.to(a )
model.eval()
_UpperCamelCase = model(a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self , a , a , a , a , a , a , a ) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = NezhaForTokenClassification(config=a )
model.to(a )
model.eval()
_UpperCamelCase = model(a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self , a , a , a , a , a , a , a ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.num_choices
_UpperCamelCase = NezhaForMultipleChoice(config=a )
model.to(a )
model.eval()
_UpperCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase = model(
a , attention_mask=a , token_type_ids=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A_ ( self ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
UpperCamelCase_ : int = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : Dict = (
{
"feature-extraction": NezhaModel,
"fill-mask": NezhaForMaskedLM,
"question-answering": NezhaForQuestionAnswering,
"text-classification": NezhaForSequenceClassification,
"token-classification": NezhaForTokenClassification,
"zero-shot": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase_ : Dict = True
def A_ ( self , a , a , a=False ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = super()._prepare_for_class(a , a , return_labels=a )
if return_labels:
if model_class in get_values(a ):
_UpperCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=a )
_UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a )
return inputs_dict
def A_ ( self ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = NezhaModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=a , hidden_size=37 )
def A_ ( self ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def A_ ( self ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def A_ ( self ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*a )
def A_ ( self ) -> Dict:
'''simple docstring'''
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
_UpperCamelCase = None
self.model_tester.create_and_check_model_as_decoder(
a , a , a , a , a , a , a , a , a , )
def A_ ( self ) -> str:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a )
def A_ ( self ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*a )
def A_ ( self ) -> str:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*a )
def A_ ( self ) -> int:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a )
def A_ ( self ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a )
def A_ ( self ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a )
def A_ ( self ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a )
@slow
def A_ ( self ) -> List[Any]:
'''simple docstring'''
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = NezhaModel.from_pretrained(a )
self.assertIsNotNone(a )
@slow
@require_torch_gpu
def A_ ( self ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
_UpperCamelCase = True
_UpperCamelCase = model_class(config=a )
_UpperCamelCase = self._prepare_for_class(a , a )
_UpperCamelCase = torch.jit.trace(
a , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , """bert.pt""" ) )
_UpperCamelCase = torch.jit.load(os.path.join(a , """bert.pt""" ) , map_location=a )
loaded(inputs_dict["""input_ids"""].to(a ) , inputs_dict["""attention_mask"""].to(a ) )
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def A_ ( self ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = NezhaModel.from_pretrained("""sijunhe/nezha-cn-base""" )
_UpperCamelCase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_UpperCamelCase = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_UpperCamelCase = model(a , attention_mask=a )[0]
_UpperCamelCase = torch.Size((1, 6, 7_68) )
self.assertEqual(output.shape , a )
_UpperCamelCase = torch.tensor([[[0.0685, 0.2441, 0.1102], [0.0600, 0.1906, 0.1349], [0.0221, 0.0819, 0.0586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) )
@slow
def A_ ( self ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = NezhaForMaskedLM.from_pretrained("""sijunhe/nezha-cn-base""" )
_UpperCamelCase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_UpperCamelCase = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_UpperCamelCase = model(a , attention_mask=a )[0]
_UpperCamelCase = torch.Size((1, 6, 2_11_28) )
self.assertEqual(output.shape , a )
_UpperCamelCase = torch.tensor(
[[-2.7939, -1.7902, -2.2189], [-2.8585, -1.8908, -2.3723], [-2.6499, -1.7750, -2.2558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) )
| 202
| 0
|
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def lowerCamelCase__ ( a : Dict ) -> Optional[Any]:
"""simple docstring"""
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def lowerCamelCase__ ( a : Dict ) -> Tuple:
"""simple docstring"""
class lowerCAmelCase_ :
def __init__( self : int , __A : int ) ->Dict:
"""simple docstring"""
a__ :Dict = metric_id
class lowerCAmelCase_ :
lowerCamelCase_ = [MetricMock(_a) for metric_id in ['accuracy', 'mse', 'precision', 'codeparrot/apps_metric']]
def _snake_case ( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def lowerCamelCase__ ( a : int , a : List[str] , a : List[Any] , a : Tuple , a : List[str] ) -> List[str]:
"""simple docstring"""
if "tmp_path" in args:
a__ :Optional[int] = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(a , match="https://huggingface.co/docs/evaluate" ):
func(*a )
| 395
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AutoformerForPrediction''',
'''AutoformerModel''',
'''AutoformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
snake_case__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 395
| 1
|
"""simple docstring"""
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = ['''image_processor''', '''tokenizer''']
_snake_case = '''BridgeTowerImageProcessor'''
_snake_case = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self , snake_case_ , snake_case_ ) -> List[Any]:
super().__init__(snake_case_ , snake_case_ )
def __call__( self , snake_case_ , snake_case_ = None , snake_case_ = True , snake_case_ = False , snake_case_ = None , snake_case_ = None , snake_case_ = 0 , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = True , snake_case_ = None , **snake_case_ , ) -> BatchEncoding:
__lowerCAmelCase = self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_token_type_ids=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
# add pixel_values + pixel_mask
__lowerCAmelCase = self.image_processor(
snake_case_ , return_tensors=snake_case_ , do_normalize=snake_case_ , do_center_crop=snake_case_ , **snake_case_ )
encoding.update(snake_case_ )
return encoding
def A__ ( self , *snake_case_ , **snake_case_ ) -> Optional[int]:
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def A__ ( self , *snake_case_ , **snake_case_ ) -> List[str]:
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def A__ ( self ) -> List[str]:
__lowerCAmelCase = self.tokenizer.model_input_names
__lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 573
|
"""simple docstring"""
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
@add_end_docstrings(A__ )
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
def __init__( self , **snake_case_ ) -> Optional[Any]:
super().__init__(**snake_case_ )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
self.check_model_type(snake_case_ )
def A__ ( self , **snake_case_ ) -> Optional[Any]:
__lowerCAmelCase = {}
__lowerCAmelCase = {}
__lowerCAmelCase = {}
# preprocess args
if "points_per_batch" in kwargs:
__lowerCAmelCase = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
__lowerCAmelCase = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
__lowerCAmelCase = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
__lowerCAmelCase = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
__lowerCAmelCase = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
__lowerCAmelCase = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
__lowerCAmelCase = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
__lowerCAmelCase = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
__lowerCAmelCase = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
__lowerCAmelCase = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
__lowerCAmelCase = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
__lowerCAmelCase = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self , snake_case_ , *snake_case_ , snake_case_=None , snake_case_=None , **snake_case_ ) -> Union[str, Any]:
return super().__call__(snake_case_ , *snake_case_ , num_workers=snake_case_ , batch_size=snake_case_ , **snake_case_ )
def A__ ( self , snake_case_ , snake_case_=64 , snake_case_ = 0 , snake_case_ = 512 / 1_500 , snake_case_ = 32 , snake_case_ = 1 , ) -> Optional[int]:
__lowerCAmelCase = load_image(snake_case_ )
__lowerCAmelCase = self.image_processor.size["""longest_edge"""]
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.image_processor.generate_crop_boxes(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
__lowerCAmelCase = self.image_processor(images=snake_case_ , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
__lowerCAmelCase = self.get_inference_context()
with inference_context():
__lowerCAmelCase = self._ensure_tensor_on_device(snake_case_ , device=self.device )
__lowerCAmelCase = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
__lowerCAmelCase = image_embeddings
__lowerCAmelCase = grid_points.shape[1]
__lowerCAmelCase = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , snake_case_ , snake_case_ ):
__lowerCAmelCase = grid_points[:, i : i + points_per_batch, :, :]
__lowerCAmelCase = input_labels[:, i : i + points_per_batch]
__lowerCAmelCase = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def A__ ( self , snake_case_ , snake_case_=0.88 , snake_case_=0.95 , snake_case_=0 , snake_case_=1 , ) -> Dict:
__lowerCAmelCase = model_inputs.pop("""input_boxes""" )
__lowerCAmelCase = model_inputs.pop("""is_last""" )
__lowerCAmelCase = model_inputs.pop("""original_sizes""" ).tolist()
__lowerCAmelCase = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
__lowerCAmelCase = self.model(**snake_case_ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
__lowerCAmelCase = model_outputs["""pred_masks"""]
__lowerCAmelCase = self.image_processor.post_process_masks(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , binarize=snake_case_ )
__lowerCAmelCase = model_outputs["""iou_scores"""]
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def A__ ( self , snake_case_ , snake_case_=False , snake_case_=False , snake_case_=0.7 , ) -> str:
__lowerCAmelCase = []
__lowerCAmelCase = []
__lowerCAmelCase = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
__lowerCAmelCase = torch.cat(snake_case_ )
__lowerCAmelCase = torch.cat(snake_case_ )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.image_processor.post_process_for_mask_generation(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
__lowerCAmelCase = defaultdict(snake_case_ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(snake_case_ )
__lowerCAmelCase = {}
if output_rle_mask:
__lowerCAmelCase = rle_mask
if output_bboxes_mask:
__lowerCAmelCase = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 573
| 1
|
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> float:
'''simple docstring'''
return round(float(moles / volume ) * nfactor )
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> float:
'''simple docstring'''
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> float:
'''simple docstring'''
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> float:
'''simple docstring'''
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowercase ( snake_case_ ):
lowercase = ['image_processor', 'tokenizer']
lowercase = 'LayoutLMv3ImageProcessor'
lowercase = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self : List[Any] , snake_case : int=None , snake_case : str=None , **snake_case : int ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : str = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , snake_case , )
UpperCamelCase_ : Optional[Any] = kwargs.pop('feature_extractor' )
UpperCamelCase_ : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(snake_case , snake_case )
def __call__( self : Optional[Any] , snake_case : List[Any] , snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , snake_case : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , snake_case : Union[List[List[int]], List[List[List[int]]]] = None , snake_case : Optional[Union[List[int], List[List[int]]]] = None , snake_case : bool = True , snake_case : Union[bool, str, PaddingStrategy] = False , snake_case : Union[bool, str, TruncationStrategy] = None , snake_case : Optional[int] = None , snake_case : int = 0 , snake_case : Optional[int] = None , snake_case : Optional[bool] = None , snake_case : Optional[bool] = None , snake_case : bool = False , snake_case : bool = False , snake_case : bool = False , snake_case : bool = False , snake_case : bool = True , snake_case : Optional[Union[str, TensorType]] = None , **snake_case : Optional[int] , ) -> BatchEncoding:
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
# first, apply the image processor
UpperCamelCase_ : Optional[int] = self.image_processor(images=snake_case , return_tensors=snake_case )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(snake_case , snake_case ):
UpperCamelCase_ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCamelCase_ : str = features['words']
UpperCamelCase_ : int = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=snake_case , add_special_tokens=snake_case , padding=snake_case , truncation=snake_case , max_length=snake_case , stride=snake_case , pad_to_multiple_of=snake_case , return_token_type_ids=snake_case , return_attention_mask=snake_case , return_overflowing_tokens=snake_case , return_special_tokens_mask=snake_case , return_offsets_mapping=snake_case , return_length=snake_case , verbose=snake_case , return_tensors=snake_case , **snake_case , )
# add pixel values
UpperCamelCase_ : int = features.pop('pixel_values' )
if return_overflowing_tokens is True:
UpperCamelCase_ : Optional[Any] = self.get_overflowing_images(snake_case , encoded_inputs['overflow_to_sample_mapping'] )
UpperCamelCase_ : List[str] = images
return encoded_inputs
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case : Any , snake_case : Dict ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : List[str] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(snake_case ) != len(snake_case ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
f" {len(snake_case )} and {len(snake_case )}" )
return images_with_overflow
def SCREAMING_SNAKE_CASE__ ( self : List[str] , *snake_case : Dict , **snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
return self.tokenizer.batch_decode(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , *snake_case : Optional[int] , **snake_case : int ) -> Any:
"""simple docstring"""
return self.tokenizer.decode(*snake_case , **snake_case )
@property
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]:
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[Any]:
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , snake_case , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , snake_case , )
return self.image_processor
| 417
| 0
|
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
__lowerCamelCase = logging.get_logger(__name__)
class _UpperCamelCase( SCREAMING_SNAKE_CASE ):
def __init__( self : List[Any] , *_lowerCamelCase : int , **_lowerCamelCase : Tuple ):
warnings.warn(
"The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DeiTImageProcessor instead." , _lowerCamelCase , )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
| 721
|
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
__lowerCamelCase = logging.get_logger(__name__)
class _UpperCamelCase( SCREAMING_SNAKE_CASE ):
__A: List[str] = ["""input_features""", """attention_mask"""]
def __init__( self : Optional[int] , _lowerCamelCase : int=80 , _lowerCamelCase : int=1_60_00 , _lowerCamelCase : Optional[Any]=0.0 , _lowerCamelCase : Optional[Any]=10 , _lowerCamelCase : Union[str, Any]=25 , _lowerCamelCase : int="hamming_window" , _lowerCamelCase : List[Any]=3_27_68.0 , _lowerCamelCase : List[Any]=0.97 , _lowerCamelCase : Optional[Any]=1.0 , _lowerCamelCase : Dict=True , _lowerCamelCase : Optional[Any]=True , _lowerCamelCase : Union[str, Any]=False , **_lowerCamelCase : Optional[Any] , ):
super().__init__(feature_size=_lowerCamelCase , sampling_rate=_lowerCamelCase , padding_value=_lowerCamelCase , **_lowerCamelCase )
_UpperCAmelCase : Tuple = feature_size
_UpperCAmelCase : Optional[int] = sampling_rate
_UpperCAmelCase : Tuple = padding_value
_UpperCAmelCase : List[Any] = hop_length
_UpperCAmelCase : Union[str, Any] = win_length
_UpperCAmelCase : str = frame_signal_scale
_UpperCAmelCase : Optional[Any] = preemphasis_coeff
_UpperCAmelCase : Optional[Any] = mel_floor
_UpperCAmelCase : Optional[Any] = normalize_means
_UpperCAmelCase : Optional[int] = normalize_vars
_UpperCAmelCase : Dict = win_function
_UpperCAmelCase : List[str] = return_attention_mask
_UpperCAmelCase : Tuple = win_length * sampling_rate // 10_00
_UpperCAmelCase : Union[str, Any] = hop_length * sampling_rate // 10_00
_UpperCAmelCase : Union[str, Any] = optimal_fft_length(self.sample_size )
_UpperCAmelCase : Optional[Any] = (self.n_fft // 2) + 1
def a__ ( self : Any , _lowerCamelCase : np.array ):
if self.win_function == "hamming_window":
_UpperCAmelCase : int = window_function(window_length=self.sample_size , name=self.win_function , periodic=_lowerCamelCase )
else:
_UpperCAmelCase : str = window_function(window_length=self.sample_size , name=self.win_function )
_UpperCAmelCase : List[str] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
_UpperCAmelCase : Dict = spectrogram(
one_waveform * self.frame_signal_scale , window=_lowerCamelCase , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=_lowerCamelCase , preemphasis=self.preemphasis_coeff , mel_filters=_lowerCamelCase , mel_floor=self.mel_floor , log_mel="log" , )
return msfc_features.T
def a__ ( self : Tuple , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] ):
# make sure we normalize float32 arrays
if self.normalize_means:
_UpperCAmelCase : Union[str, Any] = x[:input_length].mean(axis=0 )
_UpperCAmelCase : List[Any] = np.subtract(_lowerCamelCase , _lowerCamelCase )
if self.normalize_vars:
_UpperCAmelCase : Optional[int] = x[:input_length].std(axis=0 )
_UpperCAmelCase : Any = np.divide(_lowerCamelCase , _lowerCamelCase )
if input_length < x.shape[0]:
_UpperCAmelCase : Tuple = padding_value
# make sure array is in float32
_UpperCAmelCase : int = x.astype(np.floataa )
return x
def a__ ( self : Any , _lowerCamelCase : List[np.ndarray] , _lowerCamelCase : Optional[np.ndarray] = None ):
_UpperCAmelCase : List[str] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(_lowerCamelCase , _lowerCamelCase , self.padding_value ) for x, n in zip(_lowerCamelCase , _lowerCamelCase )]
def __call__( self : List[Any] , _lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _lowerCamelCase : Union[bool, str, PaddingStrategy] = False , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : bool = False , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : Optional[bool] = None , _lowerCamelCase : Optional[Union[str, TensorType]] = None , _lowerCamelCase : Optional[int] = None , **_lowerCamelCase : str , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
_UpperCAmelCase : Optional[Any] = isinstance(_lowerCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
_UpperCAmelCase : int = is_batched_numpy or (
isinstance(_lowerCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_UpperCAmelCase : List[str] = [np.asarray(_lowerCamelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_lowerCamelCase , np.ndarray ):
_UpperCAmelCase : int = np.asarray(_lowerCamelCase , dtype=np.floataa )
elif isinstance(_lowerCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_UpperCAmelCase : List[str] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_UpperCAmelCase : List[Any] = [raw_speech]
# extract fbank features
_UpperCAmelCase : Dict = [self._extract_mfsc_features(_lowerCamelCase ) for one_waveform in raw_speech]
# convert into correct format for padding
_UpperCAmelCase : List[str] = BatchFeature({"input_features": features} )
_UpperCAmelCase : Tuple = self.pad(
_lowerCamelCase , padding=_lowerCamelCase , max_length=_lowerCamelCase , truncation=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , **_lowerCamelCase , )
# make sure list is in array format
_UpperCAmelCase : Dict = padded_inputs.get("input_features" )
if isinstance(input_features[0] , _lowerCamelCase ):
_UpperCAmelCase : Tuple = [np.asarray(_lowerCamelCase , dtype=np.floataa ) for feature in input_features]
_UpperCAmelCase : Dict = padded_inputs.get("attention_mask" )
if attention_mask is not None:
_UpperCAmelCase : Optional[Any] = [np.asarray(_lowerCamelCase , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
_UpperCAmelCase : Union[str, Any] = (
np.array(_lowerCamelCase , dtype=np.intaa )
if self._get_padding_strategies(_lowerCamelCase , max_length=_lowerCamelCase ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
_UpperCAmelCase : int = self.normalize(
padded_inputs["input_features"] , attention_mask=_lowerCamelCase )
if return_tensors is not None:
_UpperCAmelCase : Optional[int] = padded_inputs.convert_to_tensors(_lowerCamelCase )
return padded_inputs
| 328
| 0
|
'''simple docstring'''
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=1024 ):
__a , __a : str = [], []
__a : str = list(zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
__a , __a : List[str] = sorted_examples[0]
def is_too_big(SCREAMING_SNAKE_CASE__ ):
return tok(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
__a : Tuple = new_src + ' ' + src
__a : int = new_tgt + ' ' + tgt
if is_too_big(SCREAMING_SNAKE_CASE__ ) or is_too_big(SCREAMING_SNAKE_CASE__ ): # cant fit, finalize example
finished_src.append(SCREAMING_SNAKE_CASE__ )
finished_tgt.append(SCREAMING_SNAKE_CASE__ )
__a , __a : str = src, tgt
else: # can fit, keep adding
__a , __a : Tuple = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(SCREAMING_SNAKE_CASE__ )
finished_tgt.append(SCREAMING_SNAKE_CASE__ )
return finished_src, finished_tgt
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__a : List[Any] = Path(SCREAMING_SNAKE_CASE__ )
save_path.mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
for split in ["train"]:
__a , __a : Optional[int] = data_dir / f'''{split}.source''', data_dir / f'''{split}.target'''
__a : List[Any] = [x.rstrip() for x in Path(SCREAMING_SNAKE_CASE__ ).open().readlines()]
__a : Dict = [x.rstrip() for x in Path(SCREAMING_SNAKE_CASE__ ).open().readlines()]
__a , __a : List[Any] = pack_examples(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print(f'''packed {split} split from {len(SCREAMING_SNAKE_CASE__ )} examples -> {len(SCREAMING_SNAKE_CASE__ )}.''' )
Path(save_path / f'''{split}.source''' ).open('w' ).write('\n'.join(SCREAMING_SNAKE_CASE__ ) )
Path(save_path / f'''{split}.target''' ).open('w' ).write('\n'.join(SCREAMING_SNAKE_CASE__ ) )
for split in ["val", "test"]:
__a , __a : Dict = data_dir / f'''{split}.source''', data_dir / f'''{split}.target'''
shutil.copyfile(SCREAMING_SNAKE_CASE__ , save_path / f'''{split}.source''' )
shutil.copyfile(SCREAMING_SNAKE_CASE__ , save_path / f'''{split}.target''' )
def lowerCAmelCase__ ( ):
__a : Any = argparse.ArgumentParser()
parser.add_argument('--tok_name' , type=SCREAMING_SNAKE_CASE__ , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('--max_seq_len' , type=SCREAMING_SNAKE_CASE__ , default=128 )
parser.add_argument('--data_dir' , type=SCREAMING_SNAKE_CASE__ )
parser.add_argument('--save_path' , type=SCREAMING_SNAKE_CASE__ )
__a : int = parser.parse_args()
__a : Optional[Any] = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(SCREAMING_SNAKE_CASE__ , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 597
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
SCREAMING_SNAKE_CASE_ = False
class lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
pass
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __magic_name__ ( self ) -> List[Any]:
__a : Any = VersatileDiffusionImageVariationPipeline.from_pretrained('shi-labs/versatile-diffusion' )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__a : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__a : Optional[Any] = torch.manual_seed(0 )
__a : Any = pipe(
image=_A , generator=_A , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
__a : Dict = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__a : Tuple = np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 597
| 1
|
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , **lowerCAmelCase_ ):
"""simple docstring"""
lowercase = [x.strip() for x in open(a__ ).readlines()]
lowercase = [x.strip() for x in open(a__ ).readlines()][: len(a__ )]
lowercase = calculate_rouge(a__ , a__ , **a__ )
if save_path is not None:
save_json(a__ , a__ , indent=a__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 716
|
'''simple docstring'''
import datasets
from .evaluate import evaluate
__lowerCamelCase : Optional[Any] = "\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n"
__lowerCamelCase : Any = "\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n"
__lowerCamelCase : Union[str, Any] = "\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the SQuAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]\n >>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]\n >>> squad_metric = datasets.load_metric(\"squad\")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
def UpperCAmelCase__ (self : Union[str, Any] ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {"id": datasets.Value("string" ), "prediction_text": datasets.Value("string" )},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , )
def UpperCAmelCase__ (self : List[str] , A__ : List[Any] , A__ : List[str] ) -> str:
lowercase = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
lowercase = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
lowercase = evaluate(dataset=A__ , predictions=A__ )
return score
| 459
| 0
|
"""simple docstring"""
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
_lowerCAmelCase : Any = [
'''python''',
'''tqdm''',
'''regex''',
'''requests''',
'''packaging''',
'''filelock''',
'''numpy''',
'''tokenizers''',
'''huggingface-hub''',
'''safetensors''',
'''accelerate''',
'''pyyaml''',
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=None ) -> Optional[Any]:
'''simple docstring'''
require_version(deps[pkg] , _lowerCamelCase )
| 46
|
'''simple docstring'''
import math
def _SCREAMING_SNAKE_CASE ( __snake_case : int ):
_A = []
_A = 2
_A = int(math.sqrt(__snake_case ) ) # Size of every segment
_A = [True] * (end + 1)
_A = []
while start <= end:
if temp[start] is True:
in_prime.append(__snake_case )
for i in range(start * start , end + 1 , __snake_case ):
_A = False
start += 1
prime += in_prime
_A = end + 1
_A = min(2 * end , __snake_case )
while low <= n:
_A = [True] * (high - low + 1)
for each in in_prime:
_A = math.floor(low / each ) * each
if t < low:
t += each
for j in range(__snake_case , high + 1 , __snake_case ):
_A = False
for j in range(len(__snake_case ) ):
if temp[j] is True:
prime.append(j + low )
_A = high + 1
_A = min(high + end , __snake_case )
return prime
print(sieve(10**6))
| 107
| 0
|
"""simple docstring"""
class A_ :
def __init__( self: Optional[Any] ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase : Dict = val
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : str = None
def _lowercase ( self: Tuple ,__lowerCAmelCase: List[str] ):
'''simple docstring'''
if self.val:
if val < self.val:
if self.left is None:
_lowerCamelCase : int = Node(__lowerCAmelCase )
else:
self.left.insert(__lowerCAmelCase )
elif val > self.val:
if self.right is None:
_lowerCamelCase : Optional[Any] = Node(__lowerCAmelCase )
else:
self.right.insert(__lowerCAmelCase )
else:
_lowerCamelCase : Optional[Any] = val
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
if root:
inorder(root.left , _lowerCamelCase )
res.append(root.val )
inorder(root.right , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> List[str]:
'''simple docstring'''
if len(_lowerCamelCase ) == 0:
return arr
_lowerCamelCase : Any = Node(arr[0] )
for i in range(1 , len(_lowerCamelCase ) ):
root.insert(arr[i] )
# Traverse BST in order.
_lowerCamelCase : int = []
inorder(_lowerCamelCase , _lowerCamelCase )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 386
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowerCAmelCase : int = logging.get_logger(__name__)
class A_ ( _a ):
lowerCAmelCase__ = ['pixel_values']
def __init__( self: List[str] ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Dict[str, int] = None ,__lowerCAmelCase: PILImageResampling = PILImageResampling.BICUBIC ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Union[int, float] = 1 / 255 ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Optional[Union[float, List[float]]] = None ,__lowerCAmelCase: Optional[Union[float, List[float]]] = None ,__lowerCAmelCase: bool = True ,**__lowerCAmelCase: Optional[int] ,):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
_lowerCamelCase : int = size if size is not None else {"height": 384, "width": 384}
_lowerCamelCase : Optional[int] = get_size_dict(__lowerCAmelCase ,default_to_square=__lowerCAmelCase )
_lowerCamelCase : str = do_resize
_lowerCamelCase : List[str] = size
_lowerCamelCase : Optional[Any] = resample
_lowerCamelCase : str = do_rescale
_lowerCamelCase : int = rescale_factor
_lowerCamelCase : int = do_normalize
_lowerCamelCase : Dict = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_lowerCamelCase : Tuple = image_std if image_std is not None else OPENAI_CLIP_STD
_lowerCamelCase : Optional[Any] = do_convert_rgb
def _lowercase ( self: Dict ,__lowerCAmelCase: np.ndarray ,__lowerCAmelCase: Dict[str, int] ,__lowerCAmelCase: PILImageResampling = PILImageResampling.BICUBIC ,__lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None ,**__lowerCAmelCase: Optional[Any] ,):
'''simple docstring'''
_lowerCamelCase : Optional[int] = get_size_dict(__lowerCAmelCase ,default_to_square=__lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
_lowerCamelCase : Dict = (size["height"], size["width"])
return resize(__lowerCAmelCase ,size=__lowerCAmelCase ,resample=__lowerCAmelCase ,data_format=__lowerCAmelCase ,**__lowerCAmelCase )
def _lowercase ( self: Tuple ,__lowerCAmelCase: np.ndarray ,__lowerCAmelCase: Union[int, float] ,__lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None ,**__lowerCAmelCase: List[str] ,):
'''simple docstring'''
return rescale(__lowerCAmelCase ,scale=__lowerCAmelCase ,data_format=__lowerCAmelCase ,**__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: np.ndarray ,__lowerCAmelCase: Union[float, List[float]] ,__lowerCAmelCase: Union[float, List[float]] ,__lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None ,**__lowerCAmelCase: int ,):
'''simple docstring'''
return normalize(__lowerCAmelCase ,mean=__lowerCAmelCase ,std=__lowerCAmelCase ,data_format=__lowerCAmelCase ,**__lowerCAmelCase )
def _lowercase ( self: Tuple ,__lowerCAmelCase: ImageInput ,__lowerCAmelCase: Optional[bool] = None ,__lowerCAmelCase: Optional[Dict[str, int]] = None ,__lowerCAmelCase: PILImageResampling = None ,__lowerCAmelCase: Optional[bool] = None ,__lowerCAmelCase: Optional[float] = None ,__lowerCAmelCase: Optional[bool] = None ,__lowerCAmelCase: Optional[Union[float, List[float]]] = None ,__lowerCAmelCase: Optional[Union[float, List[float]]] = None ,__lowerCAmelCase: Optional[Union[str, TensorType]] = None ,__lowerCAmelCase: bool = None ,__lowerCAmelCase: ChannelDimension = ChannelDimension.FIRST ,**__lowerCAmelCase: Tuple ,):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = do_resize if do_resize is not None else self.do_resize
_lowerCamelCase : Optional[int] = resample if resample is not None else self.resample
_lowerCamelCase : List[str] = do_rescale if do_rescale is not None else self.do_rescale
_lowerCamelCase : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCamelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize
_lowerCamelCase : int = image_mean if image_mean is not None else self.image_mean
_lowerCamelCase : Dict = image_std if image_std is not None else self.image_std
_lowerCamelCase : int = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_lowerCamelCase : Dict = size if size is not None else self.size
_lowerCamelCase : Tuple = get_size_dict(__lowerCAmelCase ,default_to_square=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = make_list_of_images(__lowerCAmelCase )
if not valid_images(__lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_lowerCamelCase : Union[str, Any] = [convert_to_rgb(__lowerCAmelCase ) for image in images]
# All transformations expect numpy arrays.
_lowerCamelCase : List[Any] = [to_numpy_array(__lowerCAmelCase ) for image in images]
if do_resize:
_lowerCamelCase : Tuple = [self.resize(image=__lowerCAmelCase ,size=__lowerCAmelCase ,resample=__lowerCAmelCase ) for image in images]
if do_rescale:
_lowerCamelCase : Optional[int] = [self.rescale(image=__lowerCAmelCase ,scale=__lowerCAmelCase ) for image in images]
if do_normalize:
_lowerCamelCase : Dict = [self.normalize(image=__lowerCAmelCase ,mean=__lowerCAmelCase ,std=__lowerCAmelCase ) for image in images]
_lowerCamelCase : Any = [to_channel_dimension_format(__lowerCAmelCase ,__lowerCAmelCase ) for image in images]
_lowerCamelCase : str = BatchFeature(data={"pixel_values": images} ,tensor_type=__lowerCAmelCase )
return encoded_outputs
| 386
| 1
|
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'--original_config_file',
default=None,
type=str,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--scheduler_type',
default='pndm',
type=str,
help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']',
)
parser.add_argument(
'--pipeline_type',
default=None,
type=str,
help=(
'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''
'. If `None` pipeline will be automatically inferred.'
),
)
parser.add_argument(
'--image_size',
default=None,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--prediction_type',
default=None,
type=str,
help=(
'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'
' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
parser.add_argument(
'--stable_unclip',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.',
)
parser.add_argument(
'--stable_unclip_prior',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.',
)
parser.add_argument(
'--clip_stats_path',
type=str,
help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.',
required=False,
)
parser.add_argument(
'--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.'
)
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--vae_path',
type=str,
default=None,
required=False,
help='Set to a path, hub id to an already converted vae to not convert it again.',
)
lowerCamelCase : Union[str, Any] = parser.parse_args()
lowerCamelCase : List[str] = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 587
|
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class __lowercase (UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = RoFormerTokenizer
_snake_case = RoFormerTokenizerFast
_snake_case = True
_snake_case = True
def UpperCAmelCase ( self ) -> int:
super().setUp()
def UpperCAmelCase ( self , **A ) -> Any:
return self.tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **A )
def UpperCAmelCase ( self , **A ) -> Union[str, Any]:
return self.rust_tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **A )
def UpperCAmelCase ( self ) -> Any:
snake_case : int = """永和服装饰品有限公司,今天天气非常好"""
snake_case : Optional[int] = """永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"""
return input_text, output_text
def UpperCAmelCase ( self ) -> int:
snake_case : Optional[Any] = self.get_tokenizer()
snake_case , snake_case : Union[str, Any] = self.get_chinese_input_output_texts()
snake_case : Any = tokenizer.tokenize(A )
self.assertListEqual(A , output_text.split() )
snake_case : List[Any] = tokens + [tokenizer.unk_token]
snake_case : Optional[int] = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
def UpperCAmelCase ( self ) -> List[str]:
snake_case : Union[str, Any] = self.get_rust_tokenizer()
snake_case , snake_case : int = self.get_chinese_input_output_texts()
snake_case : Dict = tokenizer.tokenize(A )
self.assertListEqual(A , output_text.split() )
snake_case : List[Any] = tokens + [tokenizer.unk_token]
snake_case : Any = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
def UpperCAmelCase ( self ) -> Dict:
pass
def UpperCAmelCase ( self ) -> Union[str, Any]:
pass
def UpperCAmelCase ( self ) -> Dict:
pass
| 587
| 1
|
"""simple docstring"""
from copy import deepcopy
class lowerCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None ) -> None:
if arr is None and size is not None:
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = [0] * size
elif arr is not None:
self.init(lowerCAmelCase__ )
else:
raise ValueError('Either arr or size must be specified' )
def __A ( self , lowerCAmelCase__ ) -> None:
SCREAMING_SNAKE_CASE = len(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = deepcopy(lowerCAmelCase__ )
for i in range(1 , self.size ):
SCREAMING_SNAKE_CASE = self.next_(lowerCAmelCase__ )
if j < self.size:
self.tree[j] += self.tree[i]
def __A ( self ) -> list[int]:
SCREAMING_SNAKE_CASE = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
SCREAMING_SNAKE_CASE = self.next_(lowerCAmelCase__ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def __A ( lowerCAmelCase__ ) -> int:
return index + (index & (-index))
@staticmethod
def __A ( lowerCAmelCase__ ) -> int:
return index - (index & (-index))
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
SCREAMING_SNAKE_CASE = self.next_(lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
self.add(lowerCAmelCase__ , value - self.get(lowerCAmelCase__ ) )
def __A ( self , lowerCAmelCase__ ) -> int:
if right == 0:
return 0
SCREAMING_SNAKE_CASE = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
SCREAMING_SNAKE_CASE = self.prev(lowerCAmelCase__ )
return result
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
return self.prefix(lowerCAmelCase__ ) - self.prefix(lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ ) -> int:
return self.query(lowerCAmelCase__ , index + 1 )
def __A ( self , lowerCAmelCase__ ) -> int:
value -= self.tree[0]
if value < 0:
return -1
SCREAMING_SNAKE_CASE = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
SCREAMING_SNAKE_CASE = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 327
|
"""simple docstring"""
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''snap-research/efficientformer-l1-300''': (
'''https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'''
),
}
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = """efficientformer"""
def __init__( self , lowerCAmelCase__ = [3, 2, 6, 4] , lowerCAmelCase__ = [48, 96, 224, 448] , lowerCAmelCase__ = [True, True, True, True] , lowerCAmelCase__ = 448 , lowerCAmelCase__ = 32 , lowerCAmelCase__ = 4 , lowerCAmelCase__ = 7 , lowerCAmelCase__ = 5 , lowerCAmelCase__ = 8 , lowerCAmelCase__ = 4 , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = 16 , lowerCAmelCase__ = 3 , lowerCAmelCase__ = 3 , lowerCAmelCase__ = 3 , lowerCAmelCase__ = 2 , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = 1 , lowerCAmelCase__ = True , lowerCAmelCase__ = True , lowerCAmelCase__ = 1e-5 , lowerCAmelCase__ = "gelu" , lowerCAmelCase__ = 0.02 , lowerCAmelCase__ = 1e-12 , lowerCAmelCase__ = 224 , lowerCAmelCase__ = 1e-05 , **lowerCAmelCase__ , ) -> None:
super().__init__(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = mlp_expansion_ratio
SCREAMING_SNAKE_CASE = downsamples
SCREAMING_SNAKE_CASE = dim
SCREAMING_SNAKE_CASE = key_dim
SCREAMING_SNAKE_CASE = attention_ratio
SCREAMING_SNAKE_CASE = resolution
SCREAMING_SNAKE_CASE = pool_size
SCREAMING_SNAKE_CASE = downsample_patch_size
SCREAMING_SNAKE_CASE = downsample_stride
SCREAMING_SNAKE_CASE = downsample_pad
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = num_metaad_blocks
SCREAMING_SNAKE_CASE = distillation
SCREAMING_SNAKE_CASE = use_layer_scale
SCREAMING_SNAKE_CASE = layer_scale_init_value
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = batch_norm_eps
| 327
| 1
|
'''simple docstring'''
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def __UpperCamelCase ( lowercase__ : Any ):
'''simple docstring'''
if is_torch_version('<', '2.0.0' ) or not hasattr(lowercase__, '_dynamo' ):
return False
return isinstance(lowercase__, torch._dynamo.eval_frame.OptimizedModule )
def __UpperCamelCase ( lowercase__ : int, lowercase__ : bool = True ):
'''simple docstring'''
__lowercase =(torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__lowercase =is_compiled_module(lowercase__ )
if is_compiled:
__lowercase =model
__lowercase =model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(lowercase__, lowercase__ ):
__lowercase =model.module
if not keep_fpaa_wrapper:
__lowercase =getattr(lowercase__, 'forward' )
__lowercase =model.__dict__.pop('_original_forward', lowercase__ )
if original_forward is not None:
while hasattr(lowercase__, '__wrapped__' ):
__lowercase =forward.__wrapped__
if forward == original_forward:
break
__lowercase =forward
if getattr(lowercase__, '_converted_to_transformer_engine', lowercase__ ):
convert_model(lowercase__, to_transformer_engine=lowercase__ )
if is_compiled:
__lowercase =model
__lowercase =compiled_model
return model
def __UpperCamelCase ( ):
'''simple docstring'''
PartialState().wait_for_everyone()
def __UpperCamelCase ( lowercase__ : Any, lowercase__ : Optional[Any] ):
'''simple docstring'''
if PartialState().distributed_type == DistributedType.TPU:
xm.save(lowercase__, lowercase__ )
elif PartialState().local_process_index == 0:
torch.save(lowercase__, lowercase__ )
@contextmanager
def __UpperCamelCase ( **lowercase__ : int ):
'''simple docstring'''
for key, value in kwargs.items():
__lowercase =str(lowercase__ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def __UpperCamelCase ( lowercase__ : List[Any] ):
'''simple docstring'''
if not hasattr(lowercase__, '__qualname__' ) and not hasattr(lowercase__, '__name__' ):
__lowercase =getattr(lowercase__, '__class__', lowercase__ )
if hasattr(lowercase__, '__qualname__' ):
return obj.__qualname__
if hasattr(lowercase__, '__name__' ):
return obj.__name__
return str(lowercase__ )
def __UpperCamelCase ( lowercase__ : Union[str, Any], lowercase__ : int ):
'''simple docstring'''
for key, value in source.items():
if isinstance(lowercase__, lowercase__ ):
__lowercase =destination.setdefault(lowercase__, {} )
merge_dicts(lowercase__, lowercase__ )
else:
__lowercase =value
return destination
def __UpperCamelCase ( lowercase__ : int = None ):
'''simple docstring'''
if port is None:
__lowercase =2_95_00
with socket.socket(socket.AF_INET, socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 119
|
"""simple docstring"""
def __magic_name__ ( UpperCamelCase : float , UpperCamelCase : float , UpperCamelCase : float , UpperCamelCase : float , UpperCamelCase : float , ) -> float:
a__ = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('All input parameters must be positive' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('Relative densities cannot be greater than one' )
else:
a__ = 1 - (matter_density + radiation_density + dark_energy)
a__ = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
a__ = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
a : Any = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 273
| 0
|
UpperCAmelCase_ = '''0.18.2'''
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 519
|
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = BlenderbotSmallConfig
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = 'gelu'
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=99 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=37 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=20 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=0 , ):
"""simple docstring"""
_snake_case : Any = parent
_snake_case : str = batch_size
_snake_case : Optional[Any] = seq_length
_snake_case : Union[str, Any] = is_training
_snake_case : int = use_labels
_snake_case : int = vocab_size
_snake_case : Tuple = hidden_size
_snake_case : Any = num_hidden_layers
_snake_case : Optional[int] = num_attention_heads
_snake_case : List[str] = intermediate_size
_snake_case : int = hidden_dropout_prob
_snake_case : str = attention_probs_dropout_prob
_snake_case : str = max_position_embeddings
_snake_case : Optional[Any] = eos_token_id
_snake_case : Dict = pad_token_id
_snake_case : Dict = bos_token_id
def __lowerCamelCase( self ):
"""simple docstring"""
_snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_snake_case : Union[str, Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_snake_case : Tuple = tf.concat([input_ids, eos_tensor] , axis=1 )
_snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case : int = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_snake_case : Dict = prepare_blenderbot_small_inputs_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return config, inputs_dict
def __lowerCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_snake_case : Dict = TFBlenderbotSmallModel(config=SCREAMING_SNAKE_CASE__ ).get_decoder()
_snake_case : Any = inputs_dict["""input_ids"""]
_snake_case : Optional[int] = input_ids[:1, :]
_snake_case : Any = inputs_dict["""attention_mask"""][:1, :]
_snake_case : Dict = inputs_dict["""head_mask"""]
_snake_case : Dict = 1
# first forward pass
_snake_case : str = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , head_mask=SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ )
_snake_case , _snake_case : Tuple = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_snake_case : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case : Dict = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_snake_case : Union[str, Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
_snake_case : int = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_snake_case : Dict = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )[0]
_snake_case : Dict = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_snake_case : List[str] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_snake_case : List[Any] = output_from_no_past[:, -3:, random_slice_idx]
_snake_case : List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , rtol=1e-3 )
def UpperCAmelCase ( A__ , A__ , A__ , A__=None , A__=None , A__=None , A__=None , A__=None , ) -> Union[str, Any]:
if attention_mask is None:
_snake_case : str = tf.cast(tf.math.not_equal(A__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_snake_case : Optional[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_snake_case : Optional[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case : Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_snake_case : List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
SCREAMING_SNAKE_CASE_ = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ = (
{
'conversational': TFBlenderbotSmallForConditionalGeneration,
'feature-extraction': TFBlenderbotSmallModel,
'summarization': TFBlenderbotSmallForConditionalGeneration,
'text2text-generation': TFBlenderbotSmallForConditionalGeneration,
'translation': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def __lowerCamelCase( self ):
"""simple docstring"""
_snake_case : int = TFBlenderbotSmallModelTester(self )
_snake_case : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCamelCase( self ):
"""simple docstring"""
_snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*SCREAMING_SNAKE_CASE__ )
@require_tokenizers
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [
'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '
' i\'m going to throw up.\nand why is that?'
]
SCREAMING_SNAKE_CASE_ = 'facebook/blenderbot_small-90M'
@cached_property
def __lowerCamelCase( self ):
"""simple docstring"""
return BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
@cached_property
def __lowerCamelCase( self ):
"""simple docstring"""
_snake_case : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def __lowerCamelCase( self ):
"""simple docstring"""
_snake_case : Tuple = self.tokenizer(self.src_text , return_tensors="""tf""" )
_snake_case : List[str] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=SCREAMING_SNAKE_CASE__ , )
_snake_case : Optional[int] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=SCREAMING_SNAKE_CASE__ )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 519
| 1
|
from __future__ import annotations
def a__ ( snake_case__ : list[float] , snake_case__ : str ):
print(f'''Vertex\tShortest Distance from vertex {src}''' )
for i, d in enumerate(snake_case__ ):
print(f'''{i}\t\t{d}''' )
def a__ ( snake_case__ : list[dict[str, int]] , snake_case__ : list[float] , snake_case__ : int ):
for j in range(snake_case__ ):
_UpperCAmelCase,_UpperCAmelCase,_UpperCAmelCase : Union[str, Any] = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
return True
return False
def a__ ( snake_case__ : list[dict[str, int]] , snake_case__ : int , snake_case__ : int , snake_case__ : int ):
_UpperCAmelCase : List[str] = [float("""inf""" )] * vertex_count
_UpperCAmelCase : Tuple = 0.0
for _ in range(vertex_count - 1 ):
for j in range(snake_case__ ):
_UpperCAmelCase,_UpperCAmelCase,_UpperCAmelCase : List[Any] = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
_UpperCAmelCase : Optional[int] = distance[u] + w
_UpperCAmelCase : Optional[Any] = check_negative_cycle(snake_case__ , snake_case__ , snake_case__ )
if negative_cycle_exists:
raise Exception("""Negative cycle found""" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ : Dict = int(input('Enter number of vertices: ').strip())
SCREAMING_SNAKE_CASE__ : List[Any] = int(input('Enter number of edges: ').strip())
SCREAMING_SNAKE_CASE__ : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print('Edge ', i + 1)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = (
int(x)
for x in input('Enter source, destination, weight: ').strip().split(' ')
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {'src': src, 'dst': dest, 'weight': weight}
SCREAMING_SNAKE_CASE__ : List[str] = int(input('\nEnter shortest path source:').strip())
SCREAMING_SNAKE_CASE__ : int = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 643
|
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE__ : Optional[int] = '▁'
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class _SCREAMING_SNAKE_CASE ( A , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = BertGenerationTokenizer
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
def __snake_case( self ):
super().setUp()
_UpperCAmelCase : Optional[int] = BertGenerationTokenizer(A_ , keep_accents=A_ )
tokenizer.save_pretrained(self.tmpdirname )
def __snake_case( self ):
_UpperCAmelCase : Optional[int] = """<s>"""
_UpperCAmelCase : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ )
def __snake_case( self ):
_UpperCAmelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(A_ ) , 10_02 )
def __snake_case( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def __snake_case( self ):
_UpperCAmelCase : Union[str, Any] = BertGenerationTokenizer(A_ , keep_accents=A_ )
_UpperCAmelCase : List[Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A_ ) , [2_85, 46, 10, 1_70, 3_82] , )
_UpperCAmelCase : int = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
_UpperCAmelCase : List[str] = tokenizer.convert_tokens_to_ids(A_ )
self.assertListEqual(
A_ , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_UpperCAmelCase : Tuple = tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(
A_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def __snake_case( self ):
return BertGenerationTokenizer.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
@slow
def __snake_case( self ):
_UpperCAmelCase : Optional[Any] = """Hello World!"""
_UpperCAmelCase : str = [1_85_36, 22_60, 1_01]
self.assertListEqual(A_ , self.big_tokenizer.encode(A_ ) )
@slow
def __snake_case( self ):
_UpperCAmelCase : List[str] = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
_UpperCAmelCase : List[Any] = [
8_71,
4_19,
3_58,
9_46,
9_91,
25_21,
4_52,
3_58,
13_57,
3_87,
77_51,
35_36,
1_12,
9_85,
4_56,
1_26,
8_65,
9_38,
54_00,
57_34,
4_58,
13_68,
4_67,
7_86,
24_62,
52_46,
11_59,
6_33,
8_65,
45_19,
4_57,
5_82,
8_52,
25_57,
4_27,
9_16,
5_08,
4_05,
3_43_24,
4_97,
3_91,
4_08,
1_13_42,
12_44,
3_85,
1_00,
9_38,
9_85,
4_56,
5_74,
3_62,
1_25_97,
32_00,
31_29,
11_72,
]
self.assertListEqual(A_ , self.big_tokenizer.encode(A_ ) )
@require_torch
@slow
def __snake_case( self ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
_UpperCAmelCase : Optional[Any] = list(self.big_tokenizer.get_vocab().keys() )[:10]
_UpperCAmelCase : Tuple = """ """.join(A_ )
_UpperCAmelCase : List[Any] = self.big_tokenizer.encode_plus(A_ , return_tensors="""pt""" , return_token_type_ids=A_ )
_UpperCAmelCase : Any = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=A_ )
_UpperCAmelCase : int = BertGenerationConfig()
_UpperCAmelCase : Dict = BertGenerationEncoder(A_ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**A_ )
model(**A_ )
@slow
def __snake_case( self ):
# fmt: off
_UpperCAmelCase : Optional[int] = {"""input_ids""": [[3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14], [4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A_ , model_name="""google/bert_for_seq_generation_L-24_bbc_encoder""" , revision="""c817d1fd1be2ffa69431227a1fe320544943d4db""" , )
| 643
| 1
|
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'kakaobrain/align-base': 'https://huggingface.co/kakaobrain/align-base/resolve/main/config.json',
}
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
a_ : Tuple = '''align_text_model'''
def __init__(self , UpperCAmelCase=3_0_5_2_2 , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_1_2 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase=0 , UpperCAmelCase="absolute" , UpperCAmelCase=True , **UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase)
__UpperCAmelCase =vocab_size
__UpperCAmelCase =hidden_size
__UpperCAmelCase =num_hidden_layers
__UpperCAmelCase =num_attention_heads
__UpperCAmelCase =hidden_act
__UpperCAmelCase =intermediate_size
__UpperCAmelCase =hidden_dropout_prob
__UpperCAmelCase =attention_probs_dropout_prob
__UpperCAmelCase =max_position_embeddings
__UpperCAmelCase =type_vocab_size
__UpperCAmelCase =initializer_range
__UpperCAmelCase =layer_norm_eps
__UpperCAmelCase =position_embedding_type
__UpperCAmelCase =use_cache
__UpperCAmelCase =pad_token_id
@classmethod
def A__ (cls , UpperCAmelCase , **UpperCAmelCase):
'''simple docstring'''
cls._set_token_in_kwargs(UpperCAmelCase)
__UpperCAmelCase , __UpperCAmelCase =cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase)
# get the text config dict if we are loading from AlignConfig
if config_dict.get('''model_type''') == "align":
__UpperCAmelCase =config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(UpperCAmelCase , **UpperCAmelCase)
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
a_ : Dict = '''align_vision_model'''
def __init__(self , UpperCAmelCase = 3 , UpperCAmelCase = 6_0_0 , UpperCAmelCase = 2.0 , UpperCAmelCase = 3.1 , UpperCAmelCase = 8 , UpperCAmelCase = [3, 3, 5, 3, 5, 5, 3] , UpperCAmelCase = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , UpperCAmelCase = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , UpperCAmelCase = [] , UpperCAmelCase = [1, 2, 2, 2, 1, 2, 1] , UpperCAmelCase = [1, 2, 2, 3, 3, 4, 1] , UpperCAmelCase = [1, 6, 6, 6, 6, 6, 6] , UpperCAmelCase = 0.25 , UpperCAmelCase = "swish" , UpperCAmelCase = 2_5_6_0 , UpperCAmelCase = "mean" , UpperCAmelCase = 0.02 , UpperCAmelCase = 0.001 , UpperCAmelCase = 0.99 , UpperCAmelCase = 0.2 , **UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase)
__UpperCAmelCase =num_channels
__UpperCAmelCase =image_size
__UpperCAmelCase =width_coefficient
__UpperCAmelCase =depth_coefficient
__UpperCAmelCase =depth_divisor
__UpperCAmelCase =kernel_sizes
__UpperCAmelCase =in_channels
__UpperCAmelCase =out_channels
__UpperCAmelCase =depthwise_padding
__UpperCAmelCase =strides
__UpperCAmelCase =num_block_repeats
__UpperCAmelCase =expand_ratios
__UpperCAmelCase =squeeze_expansion_ratio
__UpperCAmelCase =hidden_act
__UpperCAmelCase =hidden_dim
__UpperCAmelCase =pooling_type
__UpperCAmelCase =initializer_range
__UpperCAmelCase =batch_norm_eps
__UpperCAmelCase =batch_norm_momentum
__UpperCAmelCase =drop_connect_rate
__UpperCAmelCase =sum(UpperCAmelCase) * 4
@classmethod
def A__ (cls , UpperCAmelCase , **UpperCAmelCase):
'''simple docstring'''
cls._set_token_in_kwargs(UpperCAmelCase)
__UpperCAmelCase , __UpperCAmelCase =cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase)
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('''model_type''') == "align":
__UpperCAmelCase =config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(UpperCAmelCase , **UpperCAmelCase)
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
a_ : int = '''align'''
a_ : Optional[Any] = True
def __init__(self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=6_4_0 , UpperCAmelCase=1.0 , UpperCAmelCase=0.02 , **UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase)
if text_config is None:
__UpperCAmelCase ={}
logger.info('''text_config is None. Initializing the AlignTextConfig with default values.''')
if vision_config is None:
__UpperCAmelCase ={}
logger.info('''vision_config is None. Initializing the AlignVisionConfig with default values.''')
__UpperCAmelCase =AlignTextConfig(**UpperCAmelCase)
__UpperCAmelCase =AlignVisionConfig(**UpperCAmelCase)
__UpperCAmelCase =projection_dim
__UpperCAmelCase =temperature_init_value
__UpperCAmelCase =initializer_range
@classmethod
def A__ (cls , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase):
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCAmelCase)
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =copy.deepcopy(self.__dict__)
__UpperCAmelCase =self.text_config.to_dict()
__UpperCAmelCase =self.vision_config.to_dict()
__UpperCAmelCase =self.__class__.model_type
return output
| 702
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__=False ) -> Optional[Any]:
__UpperCAmelCase =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""module.blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""module.blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""module.blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""module.blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""module.blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""module.blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''module.cls_token''', '''vit.embeddings.cls_token'''),
('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''module.pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''module.norm.weight''', '''layernorm.weight'''),
('''module.norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__UpperCAmelCase =[(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__=False ) -> Any:
for i in range(config.num_hidden_layers ):
if base_model:
__UpperCAmelCase =''''''
else:
__UpperCAmelCase ='''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__UpperCAmelCase =state_dict.pop(f"""module.blocks.{i}.attn.qkv.weight""" )
__UpperCAmelCase =state_dict.pop(f"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__UpperCAmelCase =in_proj_weight[
: config.hidden_size, :
]
__UpperCAmelCase =in_proj_bias[: config.hidden_size]
__UpperCAmelCase =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__UpperCAmelCase =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__UpperCAmelCase =in_proj_weight[
-config.hidden_size :, :
]
__UpperCAmelCase =in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> Tuple:
__UpperCAmelCase =['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> Any:
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
__UpperCAmelCase =[
'''module.fc.fc1.weight''',
'''module.fc.fc1.bias''',
'''module.fc.bn1.weight''',
'''module.fc.bn1.bias''',
'''module.fc.bn1.running_mean''',
'''module.fc.bn1.running_var''',
'''module.fc.bn1.num_batches_tracked''',
'''module.fc.fc2.weight''',
'''module.fc.fc2.bias''',
'''module.fc.bn2.weight''',
'''module.fc.bn2.bias''',
'''module.fc.bn2.running_mean''',
'''module.fc.bn2.running_var''',
'''module.fc.bn2.num_batches_tracked''',
'''module.fc.fc3.weight''',
'''module.fc.fc3.bias''',
]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ ) -> List[str]:
__UpperCAmelCase =dct.pop(snake_case__ )
__UpperCAmelCase =val
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ ) -> Optional[Any]:
__UpperCAmelCase =ViTMSNConfig()
__UpperCAmelCase =1000
__UpperCAmelCase ='''datasets/huggingface/label-files'''
__UpperCAmelCase ='''imagenet-1k-id2label.json'''
__UpperCAmelCase =json.load(open(hf_hub_download(snake_case__ , snake_case__ ) , '''r''' ) )
__UpperCAmelCase ={int(snake_case__ ): v for k, v in idalabel.items()}
__UpperCAmelCase =idalabel
__UpperCAmelCase ={v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
__UpperCAmelCase =384
__UpperCAmelCase =1536
__UpperCAmelCase =6
elif "l16" in checkpoint_url:
__UpperCAmelCase =1024
__UpperCAmelCase =4096
__UpperCAmelCase =24
__UpperCAmelCase =16
__UpperCAmelCase =0.1
elif "b4" in checkpoint_url:
__UpperCAmelCase =4
elif "l7" in checkpoint_url:
__UpperCAmelCase =7
__UpperCAmelCase =1024
__UpperCAmelCase =4096
__UpperCAmelCase =24
__UpperCAmelCase =16
__UpperCAmelCase =0.1
__UpperCAmelCase =ViTMSNModel(snake_case__ )
__UpperCAmelCase =torch.hub.load_state_dict_from_url(snake_case__ , map_location='''cpu''' )['''target_encoder''']
__UpperCAmelCase =ViTImageProcessor(size=config.image_size )
remove_projection_head(snake_case__ )
__UpperCAmelCase =create_rename_keys(snake_case__ , base_model=snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
read_in_q_k_v(snake_case__ , snake_case__ , base_model=snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
__UpperCAmelCase ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
__UpperCAmelCase =Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
__UpperCAmelCase =ViTImageProcessor(
size=config.image_size , image_mean=snake_case__ , image_std=snake_case__ )
__UpperCAmelCase =image_processor(images=snake_case__ , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
__UpperCAmelCase =model(**snake_case__ )
__UpperCAmelCase =outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
__UpperCAmelCase =torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
__UpperCAmelCase =torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
__UpperCAmelCase =torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
__UpperCAmelCase =torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
__UpperCAmelCase =torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , snake_case__ , atol=1e-4 )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case__ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
UpperCamelCase_ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 142
| 0
|
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
__magic_name__ : List[Any] = """%20""".join(argv[1:]) if len(argv) > 1 else quote(str(input("""Search: """)))
print("""Googling.....""")
__magic_name__ : List[Any] = F"https://www.google.com/search?q={query}&num=100"
__magic_name__ : Union[str, Any] = requests.get(
url,
headers={"""User-Agent""": str(UserAgent().random)},
)
try:
__magic_name__ : Optional[Any] = (
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """yuRUbf"""})
.find("""a""")
.get("""href""")
)
except AttributeError:
__magic_name__ : List[Any] = parse_qs(
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """kCrYT"""})
.find("""a""")
.get("""href""")
)["""url"""][0]
webbrowser.open(link)
| 615
|
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
__magic_name__ : Any = logging.getLogger(__name__)
require_version("""pytorch_lightning>=1.0.4""")
__magic_name__ : List[Any] = {
"""base""": AutoModel,
"""sequence-classification""": AutoModelForSequenceClassification,
"""question-answering""": AutoModelForQuestionAnswering,
"""pretraining""": AutoModelForPreTraining,
"""token-classification""": AutoModelForTokenClassification,
"""language-modeling""": AutoModelWithLMHead,
"""summarization""": AutoModelForSeqaSeqLM,
"""translation""": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
__magic_name__ : Union[str, Any] = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
__magic_name__ : Union[str, Any] = sorted(arg_to_scheduler.keys())
__magic_name__ : Tuple = """{""" + """, """.join(arg_to_scheduler_choices) + """}"""
class SCREAMING_SNAKE_CASE__ (pl.LightningModule ):
def __init__( self : Optional[int] , __lowerCamelCase : argparse.Namespace , __lowerCamelCase : Any=None , __lowerCamelCase : Dict="base" , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Any=None , **__lowerCamelCase : int , ):
"""simple docstring"""
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(__lowerCamelCase )
lowerCAmelCase__ = 0
lowerCAmelCase__ = Path(self.hparams.output_dir )
lowerCAmelCase__ = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
lowerCAmelCase__ = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'''num_labels''': num_labels} if num_labels is not None else {}) , cache_dir=__lowerCamelCase , **__lowerCamelCase , )
else:
lowerCAmelCase__ = config
lowerCAmelCase__ = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(self.hparams , __lowerCamelCase , __lowerCamelCase ):
assert hasattr(self.config , __lowerCamelCase ), F"""model config doesn't have a `{p}` attribute"""
setattr(self.config , __lowerCamelCase , getattr(self.hparams , __lowerCamelCase ) )
if tokenizer is None:
lowerCAmelCase__ = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=__lowerCamelCase , )
else:
lowerCAmelCase__ = tokenizer
lowerCAmelCase__ = MODEL_MODES[mode]
if model is None:
lowerCAmelCase__ = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=__lowerCamelCase , )
else:
lowerCAmelCase__ = model
def A__ ( self : Tuple , *__lowerCamelCase : List[str] , **__lowerCamelCase : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = self.model_type.from_pretrained(*__lowerCamelCase , **__lowerCamelCase )
def A__ ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = arg_to_scheduler[self.hparams.lr_scheduler]
lowerCAmelCase__ = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
lowerCAmelCase__ = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1}
return scheduler
def A__ ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = self.model
lowerCAmelCase__ = ['''bias''', '''LayerNorm.weight''']
lowerCAmelCase__ = [
{
'''params''': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'''weight_decay''': self.hparams.weight_decay,
},
{
'''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
if self.hparams.adafactor:
lowerCAmelCase__ = Adafactor(
__lowerCamelCase , lr=self.hparams.learning_rate , scale_parameter=__lowerCamelCase , relative_step=__lowerCamelCase )
else:
lowerCAmelCase__ = AdamW(
__lowerCamelCase , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
lowerCAmelCase__ = optimizer
lowerCAmelCase__ = self.get_lr_scheduler()
return [optimizer], [scheduler]
def A__ ( self : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
return self.validation_step(__lowerCamelCase , __lowerCamelCase )
def A__ ( self : Dict , __lowerCamelCase : Dict ):
"""simple docstring"""
return self.validation_end(__lowerCamelCase )
def A__ ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
lowerCAmelCase__ = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def A__ ( self : Union[str, Any] , __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
if stage == "test":
lowerCAmelCase__ = len(self.test_dataloader().dataset )
else:
lowerCAmelCase__ = self.get_dataloader('''train''' , self.hparams.train_batch_size , shuffle=__lowerCamelCase )
lowerCAmelCase__ = len(self.train_dataloader().dataset )
def A__ ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : bool = False ):
"""simple docstring"""
raise NotImplementedError('''You must implement this for your task''' )
def A__ ( self : Tuple ):
"""simple docstring"""
return self.train_loader
def A__ ( self : List[Any] ):
"""simple docstring"""
return self.get_dataloader('''dev''' , self.hparams.eval_batch_size , shuffle=__lowerCamelCase )
def A__ ( self : str ):
"""simple docstring"""
return self.get_dataloader('''test''' , self.hparams.eval_batch_size , shuffle=__lowerCamelCase )
def A__ ( self : Dict , __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
return os.path.join(
self.hparams.data_dir , '''cached_{}_{}_{}'''.format(
__lowerCamelCase , list(filter(__lowerCamelCase , self.hparams.model_name_or_path.split('''/''' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def A__ ( self : List[Any] , __lowerCamelCase : Dict[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.output_dir.joinpath('''best_tfmr''' )
lowerCAmelCase__ = self.step_count
self.model.save_pretrained(__lowerCamelCase )
self.tokenizer.save_pretrained(__lowerCamelCase )
@staticmethod
def A__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
parser.add_argument(
'''--model_name_or_path''' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--config_name''' , default='''''' , type=__lowerCamelCase , help='''Pretrained config name or path if not the same as model_name''' )
parser.add_argument(
'''--tokenizer_name''' , default=__lowerCamelCase , type=__lowerCamelCase , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument(
'''--cache_dir''' , default=str(Path(__lowerCamelCase ).parent / '''test_run''' / '''cache''' ) , type=__lowerCamelCase , help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' , )
parser.add_argument(
'''--encoder_layerdrop''' , type=__lowerCamelCase , help='''Encoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--decoder_layerdrop''' , type=__lowerCamelCase , help='''Decoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--dropout''' , type=__lowerCamelCase , help='''Dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--attention_dropout''' , type=__lowerCamelCase , help='''Attention dropout probability (Optional). Goes into model.config''' , )
parser.add_argument('''--learning_rate''' , default=5e-5 , type=__lowerCamelCase , help='''The initial learning rate for Adam.''' )
parser.add_argument(
'''--lr_scheduler''' , default='''linear''' , choices=__lowerCamelCase , metavar=__lowerCamelCase , type=__lowerCamelCase , help='''Learning rate scheduler''' , )
parser.add_argument('''--weight_decay''' , default=0.0 , type=__lowerCamelCase , help='''Weight decay if we apply some.''' )
parser.add_argument('''--adam_epsilon''' , default=1e-8 , type=__lowerCamelCase , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--warmup_steps''' , default=0 , type=__lowerCamelCase , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--num_workers''' , default=4 , type=__lowerCamelCase , help='''kwarg passed to DataLoader''' )
parser.add_argument('''--num_train_epochs''' , dest='''max_epochs''' , default=3 , type=__lowerCamelCase )
parser.add_argument('''--train_batch_size''' , default=32 , type=__lowerCamelCase )
parser.add_argument('''--eval_batch_size''' , default=32 , type=__lowerCamelCase )
parser.add_argument('''--adafactor''' , action='''store_true''' )
class SCREAMING_SNAKE_CASE__ (pl.Callback ):
def A__ ( self : Any , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] ):
"""simple docstring"""
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class SCREAMING_SNAKE_CASE__ (pl.Callback ):
def A__ ( self : int , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(__lowerCamelCase )
class SCREAMING_SNAKE_CASE__ (pl.Callback ):
def A__ ( self : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = trainer.lr_schedulers[0]['''scheduler''']
lowerCAmelCase__ = {F"""lr_group_{i}""": lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(__lowerCamelCase )
def A__ ( self : Dict , __lowerCamelCase : pl.Trainer , __lowerCamelCase : pl.LightningModule ):
"""simple docstring"""
rank_zero_info('''***** Validation results *****''' )
lowerCAmelCase__ = trainer.callback_metrics
# Log results
for key in sorted(__lowerCamelCase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(__lowerCamelCase , str(metrics[key] ) ) )
def A__ ( self : str , __lowerCamelCase : pl.Trainer , __lowerCamelCase : pl.LightningModule ):
"""simple docstring"""
rank_zero_info('''***** Test results *****''' )
lowerCAmelCase__ = trainer.callback_metrics
# Log and save results to file
lowerCAmelCase__ = os.path.join(pl_module.hparams.output_dir , '''test_results.txt''' )
with open(__lowerCamelCase , '''w''' ) as writer:
for key in sorted(__lowerCamelCase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(__lowerCamelCase , str(metrics[key] ) ) )
writer.write('''{} = {}\n'''.format(__lowerCamelCase , str(metrics[key] ) ) )
def a_ ( __lowerCAmelCase , __lowerCAmelCase ):
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
'''--output_dir''' , default=str(Path(__lowerCAmelCase ).parent / '''test_run''' / '''model_checkpoints''' ) , type=__lowerCAmelCase , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=__lowerCAmelCase , default='''O2''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_tpu_cores''' , dest='''tpu_cores''' , type=__lowerCAmelCase )
parser.add_argument('''--max_grad_norm''' , dest='''gradient_clip_val''' , default=1.0 , type=__lowerCAmelCase , help='''Max gradient norm''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_predict''' , action='''store_true''' , help='''Whether to run predictions on the test set.''' )
parser.add_argument(
'''--gradient_accumulation_steps''' , dest='''accumulate_grad_batches''' , type=__lowerCAmelCase , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--seed''' , type=__lowerCAmelCase , default=42 , help='''random seed for initialization''' )
parser.add_argument(
'''--data_dir''' , default=str(Path(__lowerCAmelCase ).parent / '''test_run''' / '''dummy-train-data''' ) , type=__lowerCAmelCase , help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' , )
def a_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=[] , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase , ):
pl.seed_everything(args.seed )
# init model
lowerCAmelCase__ = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=__lowerCAmelCase )
# add custom checkpoints
if checkpoint_callback is None:
lowerCAmelCase__ = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='''checkpoint''' , monitor='''val_loss''' , mode='''min''' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(__lowerCAmelCase )
if logging_callback is None:
lowerCAmelCase__ = LoggingCallback()
lowerCAmelCase__ = {}
if args.fpaa:
lowerCAmelCase__ = 16
if args.gpus > 1:
lowerCAmelCase__ = '''auto'''
lowerCAmelCase__ = '''ddp'''
lowerCAmelCase__ = args.accumulate_grad_batches
lowerCAmelCase__ = None
lowerCAmelCase__ = '''auto'''
lowerCAmelCase__ = pl.Trainer.from_argparse_args(
__lowerCAmelCase , weights_summary=__lowerCAmelCase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=__lowerCAmelCase , val_check_interval=1 , num_sanity_val_steps=2 , **__lowerCAmelCase , )
if args.do_train:
trainer.fit(__lowerCAmelCase )
else:
print('''RAG modeling tests with new set functions successfuly executed!''' )
return trainer
| 615
| 1
|
'''simple docstring'''
import math
def lowercase__ ( __UpperCamelCase : int ):
'''simple docstring'''
assert isinstance(__UpperCamelCase , __UpperCamelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
__lowercase = range(3 , int(math.sqrt(__UpperCamelCase ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def lowercase__ ( __UpperCamelCase : List[str] , __UpperCamelCase : List[Any]=1 , **__UpperCamelCase : Any ):
'''simple docstring'''
__lowercase = factor * value
__lowercase = value
while not is_prime(__UpperCamelCase ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **__UpperCamelCase )
return value
| 339
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
def lowercase__ ( __UpperCamelCase : Callable[[int | float], int | float] , __UpperCamelCase : int | float , __UpperCamelCase : int | float , __UpperCamelCase : int = 100 , ):
'''simple docstring'''
__lowercase = x_start
__lowercase = fnc(__UpperCamelCase )
__lowercase = 0.0
for _ in range(__UpperCamelCase ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
__lowercase = (x_end - x_start) / steps + xa
__lowercase = fnc(__UpperCamelCase )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
__lowercase = xa
__lowercase = fxa
return area
if __name__ == "__main__":
def lowercase__ ( __UpperCamelCase : Any ):
'''simple docstring'''
return x**3 + x**2
print('f(x) = x^3 + x^2')
print('The area between the curve, x = -5, x = 5 and the x axis is:')
snake_case : List[str] = 10
while i <= 100_000:
print(F"""with {i} steps: {trapezoidal_area(f, -5, 5, i)}""")
i *= 10
| 339
| 1
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A_ : str = {
'configuration_efficientnet': [
'EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientNetConfig',
'EfficientNetOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = ['EfficientNetImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
'EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientNetForImageClassification',
'EfficientNetModel',
'EfficientNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
A_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 456
|
def UpperCamelCase (lowercase_: str ) -> Optional[Any]:
A__ : Optional[int] = []
A__ : Optional[int] = set({"""(""", """[""", """{"""} )
A__ : str = set({""")""", """]""", """}"""} )
A__ : Optional[Any] = {"""{""": """}""", """[""": """]""", """(""": """)"""}
for i in range(len(lowercase_ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(lowercase_ ) == 0 or (len(lowercase_ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(lowercase_ ) == 0
def UpperCamelCase () -> List[str]:
A__ : Optional[Any] = input("""Enter sequence of brackets: """ )
if is_balanced(lowercase_ ):
print(lowercase_ , """is balanced""" )
else:
print(lowercase_ , """is not balanced""" )
if __name__ == "__main__":
main()
| 456
| 1
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class _UpperCamelCase ( __UpperCamelCase ):
'''simple docstring'''
def A__ ( self ):
UpperCAmelCase__ = tempfile.mkdtemp()
UpperCAmelCase__ = 8
# DPR tok
UpperCAmelCase__ = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
UpperCAmelCase__ = os.path.join(self.tmpdirname , """dpr_tokenizer""" )
os.makedirs(__lowercase , exist_ok=__lowercase )
UpperCAmelCase__ = os.path.join(__lowercase , DPR_VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
# BART tok
UpperCAmelCase__ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
UpperCAmelCase__ = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
UpperCAmelCase__ = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
UpperCAmelCase__ = {"""unk_token""": """<unk>"""}
UpperCAmelCase__ = os.path.join(self.tmpdirname , """bart_tokenizer""" )
os.makedirs(__lowercase , exist_ok=__lowercase )
UpperCAmelCase__ = os.path.join(__lowercase , BART_VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase__ = os.path.join(__lowercase , BART_VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__lowercase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__lowercase ) )
def A__ ( self ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) )
def A__ ( self ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , """bart_tokenizer""" ) )
def A__ ( self ):
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def A__ ( self ):
UpperCAmelCase__ = os.path.join(self.tmpdirname , """rag_tokenizer""" )
UpperCAmelCase__ = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
UpperCAmelCase__ = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(__lowercase )
rag_tokenizer.save_pretrained(__lowercase )
UpperCAmelCase__ = RagTokenizer.from_pretrained(__lowercase , config=__lowercase )
self.assertIsInstance(new_rag_tokenizer.question_encoder , __lowercase )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , __lowercase )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def A__ ( self ):
UpperCAmelCase__ = RagTokenizer.from_pretrained("""facebook/rag-token-nq""" )
UpperCAmelCase__ = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
UpperCAmelCase__ = tokenizer(__lowercase )
self.assertIsNotNone(__lowercase )
@slow
def A__ ( self ):
UpperCAmelCase__ = RagTokenizer.from_pretrained("""facebook/rag-sequence-nq""" )
UpperCAmelCase__ = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
UpperCAmelCase__ = tokenizer(__lowercase )
self.assertIsNotNone(__lowercase )
| 422
|
"""simple docstring"""
from datetime import datetime as dt
import os
from github import Github
a : Optional[int] = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def snake_case__ ( ) ->Dict:
UpperCAmelCase__ = Github(os.environ["""GITHUB_TOKEN"""] )
UpperCAmelCase__ = g.get_repo("""huggingface/transformers""" )
UpperCAmelCase__ = repo.get_issues(state="""open""" )
for issue in open_issues:
UpperCAmelCase__ = sorted([comment for comment in issue.get_comments()] , key=lambda _SCREAMING_SNAKE_CASE : i.created_at , reverse=_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = comments[0] if len(_SCREAMING_SNAKE_CASE ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 422
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {
'configuration_upernet': ['UperNetConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'UperNetForSemanticSegmentation',
'UperNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 473
|
"""simple docstring"""
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
UpperCamelCase = pytest.mark.integration
@pytest.mark.parametrize("path" , ["paws", "csv"] )
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :str , _SCREAMING_SNAKE_CASE :Optional[Any] ) -> Union[str, Any]:
inspect_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a_ : Dict = path + ".py"
assert script_name in os.listdir(_SCREAMING_SNAKE_CASE )
assert "__pycache__" not in os.listdir(_SCREAMING_SNAKE_CASE )
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.parametrize("path" , ["accuracy"] )
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :Optional[Any] , _SCREAMING_SNAKE_CASE :List[str] ) -> int:
inspect_metric(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = path + ".py"
assert script_name in os.listdir(_SCREAMING_SNAKE_CASE )
assert "__pycache__" not in os.listdir(_SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"path, config_name, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :Tuple , _SCREAMING_SNAKE_CASE :Optional[int] , _SCREAMING_SNAKE_CASE :int ) -> int:
a_ : List[str] = get_dataset_config_info(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :Any , _SCREAMING_SNAKE_CASE :Any , _SCREAMING_SNAKE_CASE :int ) -> Optional[Any]:
with pytest.raises(_SCREAMING_SNAKE_CASE ):
get_dataset_config_info(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"path, expected" , [
("squad", "plain_text"),
("acronym_identification", "default"),
("lhoestq/squad", "plain_text"),
("lhoestq/test", "default"),
("lhoestq/demo1", "lhoestq--demo1"),
("dalle-mini/wit", "dalle-mini--wit"),
] , )
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :Optional[int] , _SCREAMING_SNAKE_CASE :List[str] ) -> Optional[int]:
a_ : Optional[Any] = get_dataset_config_names(_SCREAMING_SNAKE_CASE )
assert expected in config_names
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config" , [
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["dalle-mini--wit"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
] , )
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :Optional[int] , _SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :Union[str, Any] ) -> Tuple:
a_ : Dict = get_dataset_infos(_SCREAMING_SNAKE_CASE )
assert list(infos.keys() ) == expected_configs
a_ : Dict = expected_configs[0]
assert expected_config in infos
a_ : int = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :Optional[int] , _SCREAMING_SNAKE_CASE :Optional[Any] , _SCREAMING_SNAKE_CASE :str ) -> List[Any]:
a_ : Tuple = get_dataset_infos(_SCREAMING_SNAKE_CASE )
assert expected_config in infos
a_ : Union[str, Any] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :Tuple , _SCREAMING_SNAKE_CASE :List[str] , _SCREAMING_SNAKE_CASE :Tuple ) -> Any:
with pytest.raises(_SCREAMING_SNAKE_CASE ):
get_dataset_split_names(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
| 473
| 1
|
import re
from filelock import FileLock
try:
import nltk
lowercase_ = True
except (ImportError, ModuleNotFoundError):
lowercase_ = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def a__ ( snake_case ):
"""simple docstring"""
re.sub('''<n>''' , '''''' , snake_case ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(snake_case ) )
| 131
|
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""snap-research/efficientformer-l1-300""": (
"""https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"""
),
}
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''efficientformer'''
def __init__( self : Dict , _A : List[int] = [3, 2, 6, 4] , _A : List[int] = [48, 96, 224, 448] , _A : List[bool] = [True, True, True, True] , _A : int = 448 , _A : int = 32 , _A : int = 4 , _A : int = 7 , _A : int = 5 , _A : int = 8 , _A : int = 4 , _A : float = 0.0 , _A : int = 16 , _A : int = 3 , _A : int = 3 , _A : int = 3 , _A : int = 2 , _A : int = 1 , _A : float = 0.0 , _A : int = 1 , _A : bool = True , _A : bool = True , _A : float = 1e-5 , _A : str = "gelu" , _A : float = 0.02 , _A : float = 1e-12 , _A : int = 224 , _A : float = 1e-05 , **_A : Tuple , ):
"""simple docstring"""
super().__init__(**_A )
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
__SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : List[str] = hidden_sizes
__SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
__SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
__SCREAMING_SNAKE_CASE : str = initializer_range
__SCREAMING_SNAKE_CASE : Any = layer_norm_eps
__SCREAMING_SNAKE_CASE : Dict = patch_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels
__SCREAMING_SNAKE_CASE : List[Any] = depths
__SCREAMING_SNAKE_CASE : Optional[Any] = mlp_expansion_ratio
__SCREAMING_SNAKE_CASE : List[str] = downsamples
__SCREAMING_SNAKE_CASE : str = dim
__SCREAMING_SNAKE_CASE : Any = key_dim
__SCREAMING_SNAKE_CASE : Tuple = attention_ratio
__SCREAMING_SNAKE_CASE : Dict = resolution
__SCREAMING_SNAKE_CASE : Dict = pool_size
__SCREAMING_SNAKE_CASE : List[str] = downsample_patch_size
__SCREAMING_SNAKE_CASE : int = downsample_stride
__SCREAMING_SNAKE_CASE : Optional[Any] = downsample_pad
__SCREAMING_SNAKE_CASE : Optional[int] = drop_path_rate
__SCREAMING_SNAKE_CASE : Optional[Any] = num_metaad_blocks
__SCREAMING_SNAKE_CASE : int = distillation
__SCREAMING_SNAKE_CASE : List[Any] = use_layer_scale
__SCREAMING_SNAKE_CASE : Optional[int] = layer_scale_init_value
__SCREAMING_SNAKE_CASE : Dict = image_size
__SCREAMING_SNAKE_CASE : List[str] = batch_norm_eps
| 131
| 1
|
"""simple docstring"""
def A__ ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = [], []
while len(UpperCAmelCase_ ) > 1:
_lowerCAmelCase , _lowerCAmelCase = min(UpperCAmelCase_ ), max(UpperCAmelCase_ )
start.append(UpperCAmelCase_ )
end.append(UpperCAmelCase_ )
collection.remove(UpperCAmelCase_ )
collection.remove(UpperCAmelCase_ )
end.reverse()
return start + collection + end
if __name__ == "__main__":
a__ : Tuple = input("""Enter numbers separated by a comma:\n""").strip()
a__ : Union[str, Any] = [int(item) for item in user_input.split(""",""")]
print(*merge_sort(unsorted), sep=""",""")
| 589
|
"""simple docstring"""
SCREAMING_SNAKE_CASE = {}
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> int:
"""simple docstring"""
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
UpperCamelCase = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
UpperCamelCase = _calculate(days - 1 , UpperCAmelCase_ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
UpperCamelCase = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
UpperCamelCase = _calculate(days - 1 , UpperCAmelCase_ , 0 )
UpperCamelCase = state_late + state_absent + state_ontime
UpperCamelCase = prizestrings
return prizestrings
def lowerCamelCase__ ( UpperCAmelCase_ = 30 )-> int:
"""simple docstring"""
return _calculate(UpperCAmelCase_ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 554
| 0
|
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
SCREAMING_SNAKE_CASE : str = '\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n'
SCREAMING_SNAKE_CASE : Tuple = '\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n'
SCREAMING_SNAKE_CASE : int = '\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "pearson": Pearson Correlation\n "spearmanr": Spearman Correlation\n "matthews_correlation": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})\n {\'pearson\': 1.0, \'spearmanr\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'cola\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def __A ( _A , _A ):
"""simple docstring"""
return float((preds == labels).mean() )
def __A ( _A , _A ):
"""simple docstring"""
__a = simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )
__a = float(fa_score(y_true=lowerCamelCase_ , y_pred=lowerCamelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def __A ( _A , _A ):
"""simple docstring"""
__a = float(pearsonr(lowerCamelCase_ , lowerCamelCase_ )[0] )
__a = float(spearmanr(lowerCamelCase_ , lowerCamelCase_ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
def _UpperCAmelCase ( self : Optional[Any] ):
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", "
"\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "stsb" else "float32" ),
"references": datasets.Value("int64" if self.config_name != "stsb" else "float32" ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" , )
def _UpperCAmelCase ( self : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple ):
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(__lowerCamelCase , __lowerCamelCase )}
elif self.config_name == "stsb":
return pearson_and_spearman(__lowerCamelCase , __lowerCamelCase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(__lowerCamelCase , __lowerCamelCase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(__lowerCamelCase , __lowerCamelCase )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", "
"\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]" )
| 702
|
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
SCREAMING_SNAKE_CASE : str = {
"""169M""": 12,
"""430M""": 24,
"""1B5""": 24,
"""3B""": 32,
"""7B""": 32,
"""14B""": 40,
}
SCREAMING_SNAKE_CASE : Dict = {
"""169M""": 768,
"""430M""": 1024,
"""1B5""": 2048,
"""3B""": 2560,
"""7B""": 4096,
"""14B""": 5120,
}
def __A ( _A ):
"""simple docstring"""
__a = list(state_dict.keys() )
for name in state_dict_keys:
__a = state_dict.pop(_A )
# emb -> embedding
if name.startswith("emb." ):
__a = name.replace("emb." , "embeddings." )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("blocks.0.ln0" ):
__a = name.replace("blocks.0.ln0" , "blocks.0.pre_ln" )
# att -> attention
__a = re.sub(r"blocks\.(\d+)\.att" , r"blocks.\1.attention" , _A )
# ffn -> feed_forward
__a = re.sub(r"blocks\.(\d+)\.ffn" , r"blocks.\1.feed_forward" , _A )
# time_mix_k -> time_mix_key and reshape
if name.endswith(".time_mix_k" ):
__a = name.replace(".time_mix_k" , ".time_mix_key" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(".time_mix_v" ):
__a = name.replace(".time_mix_v" , ".time_mix_value" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(".time_mix_r" ):
__a = name.replace(".time_mix_r" , ".time_mix_receptance" )
if name != "head.weight":
__a = "rwkv." + name
__a = weight
return state_dict
def __A ( _A , _A , _A , _A=None , _A=None , _A=False , _A=None ):
"""simple docstring"""
if tokenizer_file is None:
print("No `--tokenizer_file` provided, we will use the default tokenizer." )
__a = 5_0277
__a = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b" )
else:
__a = PreTrainedTokenizerFast(tokenizer_file=_A )
__a = len(_A )
tokenizer.save_pretrained(_A )
# 2. Build the config
__a = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
__a = candidate
break
if size is None:
raise ValueError("Could not infer the size, please provide it with the `--size` argument." )
if size not in possible_sizes:
raise ValueError(f"""`size` should be one of {possible_sizes}, got {size}.""" )
__a = RwkvConfig(
vocab_size=_A , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(_A )
# 3. Download model file then convert state_dict
__a = hf_hub_download(_A , _A )
__a = torch.load(_A , map_location="cpu" )
__a = convert_state_dict(_A )
# 4. Split in shards and save
__a , __a = shard_checkpoint(_A )
for shard_file, shard in shards.items():
torch.save(_A , os.path.join(_A , _A ) )
if index is not None:
__a = os.path.join(_A , _A )
# Save the index as well
with open(_A , "w" , encoding="utf-8" ) as f:
__a = json.dumps(_A , indent=2 , sort_keys=_A ) + "\n"
f.write(_A )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model." )
__a = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
__a = torch.load(os.path.join(_A , _A ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(_A , _A ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("Please provide a `model_name` to push the model to the Hub." )
__a = AutoModelForCausalLM.from_pretrained(_A )
model.push_to_hub(_A , max_shard_size="2GB" )
tokenizer.push_to_hub(_A )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--repo_id""", default=None, type=str, required=True, help="""Repo ID from which to pull the checkpoint."""
)
parser.add_argument(
"""--checkpoint_file""", default=None, type=str, required=True, help="""Name of the checkpoint file in the repo."""
)
parser.add_argument(
"""--output_dir""", default=None, type=str, required=True, help="""Where to save the converted model."""
)
parser.add_argument(
"""--tokenizer_file""",
default=None,
type=str,
help="""Path to the tokenizer file to use (if not provided, only the model is converted).""",
)
parser.add_argument(
"""--size""",
default=None,
type=str,
help="""Size of the model. Will be inferred from the `checkpoint_file` if not passed.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Push to the Hub the converted model.""",
)
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""Name of the pushed model on the Hub, including the username / organization.""",
)
SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 525
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10
|
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def UpperCamelCase ( lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def UpperCamelCase ( lowercase_ , lowercase_ ) -> List[Any]:
'''simple docstring'''
lowercase__ : int = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
lowercase__ : Optional[Any] = key.replace("""heads.cmd.mim_head.cls.predictions""" , """mmm_image_head""" )
lowercase__ : Optional[Any] = key.replace("""heads.cmd.mlm_head.cls.predictions""" , """mmm_text_head""" )
lowercase__ : Optional[Any] = key.replace("""heads.cmd.itm_head.cls""" , """itm_head""" )
lowercase__ : Tuple = key.replace("""heads.cmd.itm_head.pooler""" , """itm_head.pooler""" )
lowercase__ : Optional[Any] = key.replace("""heads.cmd.clip_head.logit_scale""" , """flava.logit_scale""" )
lowercase__ : Optional[int] = key.replace("""heads.fairseq_mlm.cls.predictions""" , """mlm_head""" )
lowercase__ : List[Any] = key.replace("""heads.imagenet.mim_head.cls.predictions""" , """mim_head""" )
lowercase__ : int = key.replace("""mm_text_projection""" , """flava.text_to_mm_projection""" )
lowercase__ : Optional[Any] = key.replace("""mm_image_projection""" , """flava.image_to_mm_projection""" )
lowercase__ : Optional[Any] = key.replace("""image_encoder.module""" , """flava.image_model""" )
lowercase__ : Any = key.replace("""text_encoder.module""" , """flava.text_model""" )
lowercase__ : Optional[Any] = key.replace("""mm_encoder.module.encoder.cls_token""" , """flava.multimodal_model.cls_token""" )
lowercase__ : Tuple = key.replace("""mm_encoder.module""" , """flava.multimodal_model""" )
lowercase__ : Any = key.replace("""text_projection""" , """flava.text_projection""" )
lowercase__ : List[Any] = key.replace("""image_projection""" , """flava.image_projection""" )
lowercase__ : str = value.float()
for key, value in codebook_state_dict.items():
lowercase__ : Any = value
return upgrade
@torch.no_grad()
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_=None ) -> Union[str, Any]:
'''simple docstring'''
if config_path is not None:
lowercase__ : int = FlavaConfig.from_pretrained(lowercase_ )
else:
lowercase__ : Optional[int] = FlavaConfig()
lowercase__ : List[Any] = FlavaForPreTraining(lowercase_ ).eval()
lowercase__ : Dict = convert_dalle_checkpoint(lowercase_ , lowercase_ , save_checkpoint=lowercase_ )
if os.path.exists(lowercase_ ):
lowercase__ : Dict = torch.load(lowercase_ , map_location="""cpu""" )
else:
lowercase__ : Dict = torch.hub.load_state_dict_from_url(lowercase_ , map_location="""cpu""" )
lowercase__ : int = upgrade_state_dict(lowercase_ , lowercase_ )
hf_model.load_state_dict(lowercase_ )
lowercase__ : Optional[int] = hf_model.state_dict()
lowercase__ : Optional[int] = count_parameters(lowercase_ )
lowercase__ : Any = count_parameters(lowercase_ ) + count_parameters(lowercase_ )
assert torch.allclose(lowercase_ , lowercase_ , atol=1E-3 )
hf_model.save_pretrained(lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ : int = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--codebook_path""", default=None, type=str, help="""Path to flava codebook checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
lowerCamelCase__ : List[str] = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 12
| 0
|
from __future__ import annotations
from typing import Any
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> None:
create_state_space_tree(SCREAMING_SNAKE_CASE , [] , 0 )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None:
if index == len(SCREAMING_SNAKE_CASE ):
print(SCREAMING_SNAKE_CASE )
return
create_state_space_tree(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
lowerCAmelCase__: list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["A", "B", "C"])
generate_all_subsequences(seq)
| 712
|
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCAmelCase__: Optional[int] = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase__: Dict = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
lowerCAmelCase__: Any = spec.loader.load_module()
lowerCAmelCase__: Union[str, Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowerCAmelCase__: Optional[int] = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
lowerCAmelCase__: Dict = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def __SCREAMING_SNAKE_CASE ( ) -> int:
SCREAMING_SNAKE_CASE_ : List[str] = []
for config_class in list(CONFIG_MAPPING.values() ):
SCREAMING_SNAKE_CASE_ : Any = False
# source code of `config_class`
SCREAMING_SNAKE_CASE_ : Optional[Any] = inspect.getsource(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = _re_checkpoint.findall(SCREAMING_SNAKE_CASE )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
SCREAMING_SNAKE_CASE_ : Optional[Any] = f'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
SCREAMING_SNAKE_CASE_ : Tuple = True
break
SCREAMING_SNAKE_CASE_ : Any = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
SCREAMING_SNAKE_CASE_ : str = '\n'.join(sorted(SCREAMING_SNAKE_CASE ) )
raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 311
| 0
|
'''simple docstring'''
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
UpperCAmelCase_ : Tuple = True
from torch.cuda.amp import autocast
UpperCAmelCase_ : List[str] = logging.getLogger(__name__)
@dataclass
class a :
'''simple docstring'''
__lowerCAmelCase : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__lowerCAmelCase : Optional[bool] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
__lowerCAmelCase : Optional[bool] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Whether to log verbose messages or not."""} , )
__lowerCAmelCase : Optional[float] = field(
default=2.0 , metadata={"""help""": """Maximum temperature for gumbel softmax."""} )
__lowerCAmelCase : Optional[float] = field(
default=0.5 , metadata={"""help""": """Minimum temperature for gumbel softmax."""} )
__lowerCAmelCase : Optional[float] = field(
default=0.9_9_9_9_9_5 , metadata={"""help""": """Decay of gumbel temperature during training."""} )
def UpperCAmelCase_ ( A , A ):
'''simple docstring'''
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
_a : List[Any] = logging.WARNING
if model_args.verbose_logging:
_a : List[str] = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
_a : Optional[Any] = logging.INFO
logger.setLevel(_lowerCAmelCase )
@dataclass
class a :
'''simple docstring'''
__lowerCAmelCase : str = field(
default=UpperCAmelCase_ , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
__lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
__lowerCAmelCase : Optional[str] = field(
default="""train""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} , )
__lowerCAmelCase : Optional[str] = field(
default="""validation""" , metadata={
"""help""": (
"""The name of the validation data set split to use (via the datasets library). Defaults to 'validation'"""
)
} , )
__lowerCAmelCase : Optional[str] = field(
default="""file""" , metadata={"""help""": """Column in the dataset that contains speech file path. Defaults to 'file'"""} , )
__lowerCAmelCase : bool = field(
default=UpperCAmelCase_ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
__lowerCAmelCase : Optional[int] = field(
default=1 , metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} , )
__lowerCAmelCase : Optional[int] = field(
default=UpperCAmelCase_ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
__lowerCAmelCase : Optional[float] = field(
default=2_0.0 , metadata={"""help""": """Filter audio files that are longer than `max_duration_in_seconds` seconds"""} )
@dataclass
class a :
'''simple docstring'''
__lowerCAmelCase : WavaVecaForPreTraining
__lowerCAmelCase : WavaVecaFeatureExtractor
__lowerCAmelCase : Union[bool, str] = "longest"
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : Optional[int] = None
def __call__( self , lowerCamelCase_ ) -> Any:
# reformat list to dict and set to pytorch format
_a : Tuple = self.feature_extractor.pad(
__A , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
_a : Optional[Any] = self.model._get_feat_extract_output_lengths(batch['input_values'].shape[-1] )
_a : List[Any] = batch['input_values'].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
_a : int = self.model._get_feat_extract_output_lengths(batch['attention_mask'].sum(-1 ) ).to(
torch.long )
_a : List[Any] = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch['input_values'].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
_a : List[str] = 1
_a : int = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
_a : Union[str, Any] = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=__A , min_masks=2 , )
return batch
class a ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self , *lowerCamelCase_ , lowerCamelCase_=1 , lowerCamelCase_=0 , lowerCamelCase_=1.0 , **lowerCamelCase_ ) -> Any:
super().__init__(*__A , **__A )
_a : int = 0
_a : str = max_gumbel_temp
_a : str = min_gumbel_temp
_a : Any = gumbel_temp_decay
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
model.train()
_a : str = self._prepare_inputs(__A )
if self.use_amp:
with autocast():
_a : Optional[int] = self.compute_loss(__A , __A )
else:
_a : Tuple = self.compute_loss(__A , __A )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
_a : Optional[int] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_a : Union[str, Any] = loss.sum() / (inputs['mask_time_indices']).sum()
else:
raise ValueError(F'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
_a : str = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__A ).backward()
elif self.use_apex:
with amp.scale_loss(__A , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__A )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def UpperCAmelCase_ ( ):
'''simple docstring'''
_a : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_a , _a , _a : Optional[Any] = parser.parse_args_into_dataclasses()
configure_logger(_lowerCAmelCase , _lowerCAmelCase )
# Downloading and loading a dataset from the hub.
_a : Optional[int] = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
_a : Optional[int] = DatasetDict()
_a : Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'''{data_args.train_split_name}[:{data_args.validation_split_percentage}%]''' , cache_dir=model_args.cache_dir , )
_a : int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'''{data_args.train_split_name}[{data_args.validation_split_percentage}%:]''' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
_a : List[Any] = DatasetDict()
_a : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split='validation' , cache_dir=model_args.cache_dir , )
_a : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'''{data_args.train_split_name}''' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
_a : List[Any] = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_lowerCAmelCase )
def prepare_dataset(A ):
# check that all files have the correct sampling rate
_a , _a : str = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
_a : Dict = datasets.map(
_lowerCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['train'].column_names )
# filter audio files that are too long
_a : Tuple = vectorized_datasets.filter(
lambda A : len(data['speech'] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(A ):
return feature_extractor(batch['speech'] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
_a : Union[str, Any] = vectorized_datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['train'].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
_a : List[Any] = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
'PreTraining is only supported for ``config.do_stable_layer_norm=True`` and'
' ``config.feat_extract_norm=\'layer\'' )
_a : str = WavaVecaForPreTraining(_lowerCAmelCase )
_a : List[Any] = DataCollatorForWavaVecaPretraining(model=_lowerCAmelCase , feature_extractor=_lowerCAmelCase )
_a : List[str] = WavaVecaPreTrainer(
model=_lowerCAmelCase , data_collator=_lowerCAmelCase , args=_lowerCAmelCase , train_dataset=vectorized_datasets['train'] , eval_dataset=vectorized_datasets['validation'] , tokenizer=_lowerCAmelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 120
|
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
_A: List[Any] = datasets.load_iris()
_A: Union[str, Any] = np.array(data["""data"""])
_A: Union[str, Any] = np.array(data["""target"""])
_A: Dict = data["""target_names"""]
_A , _A , _A , _A: List[str] = train_test_split(X, y)
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase )-> Optional[int]:
return np.linalg.norm(np.array(_lowerCAmelCase ) - np.array(_lowerCAmelCase ) )
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=5 )-> int:
__UpperCAmelCase = zip(_lowerCAmelCase , _lowerCAmelCase )
# List of distances of all points from the point to be classified
__UpperCAmelCase = []
for data_point in data:
__UpperCAmelCase = euclidean_distance(data_point[0] , _lowerCAmelCase )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
__UpperCAmelCase = [i[1] for i in sorted(_lowerCAmelCase )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
__UpperCAmelCase = Counter(_lowerCAmelCase ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 126
| 0
|
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCAmelCase_ = DistilBertTokenizer
lowerCAmelCase_ = DistilBertTokenizerFast
lowerCAmelCase_ = True
@slow
def _snake_case ( self : Any ) -> Tuple:
_lowerCamelCase = DistilBertTokenizer.from_pretrained('distilbert-base-uncased' )
_lowerCamelCase = tokenizer.encode('sequence builders' , add_special_tokens=snake_case__ )
_lowerCamelCase = tokenizer.encode('multi-sequence build' , add_special_tokens=snake_case__ )
_lowerCamelCase = tokenizer.build_inputs_with_special_tokens(snake_case__ )
_lowerCamelCase = tokenizer.build_inputs_with_special_tokens(snake_case__ , snake_case__ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 234
|
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def lowerCamelCase ( UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int]=10_24 ) -> int:
_lowerCamelCase , _lowerCamelCase = [], []
_lowerCamelCase = list(zip(UpperCamelCase , UpperCamelCase ) )
_lowerCamelCase , _lowerCamelCase = sorted_examples[0]
def is_too_big(UpperCamelCase : Union[str, Any] ):
return tok(UpperCamelCase , return_tensors='pt' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
_lowerCamelCase = new_src + ' ' + src
_lowerCamelCase = new_tgt + ' ' + tgt
if is_too_big(UpperCamelCase ) or is_too_big(UpperCamelCase ): # cant fit, finalize example
finished_src.append(UpperCamelCase )
finished_tgt.append(UpperCamelCase )
_lowerCamelCase , _lowerCamelCase = src, tgt
else: # can fit, keep adding
_lowerCamelCase , _lowerCamelCase = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(UpperCamelCase )
finished_tgt.append(UpperCamelCase )
return finished_src, finished_tgt
def lowerCamelCase ( UpperCamelCase : str , UpperCamelCase : Path , UpperCamelCase : int , UpperCamelCase : Optional[Any] ) -> List[Any]:
_lowerCamelCase = Path(UpperCamelCase )
save_path.mkdir(exist_ok=UpperCamelCase )
for split in ["train"]:
_lowerCamelCase , _lowerCamelCase = data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
_lowerCamelCase = [x.rstrip() for x in Path(UpperCamelCase ).open().readlines()]
_lowerCamelCase = [x.rstrip() for x in Path(UpperCamelCase ).open().readlines()]
_lowerCamelCase , _lowerCamelCase = pack_examples(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
print(F"""packed {split} split from {len(UpperCamelCase )} examples -> {len(UpperCamelCase )}.""" )
Path(save_path / F"""{split}.source""" ).open('w' ).write('\n'.join(UpperCamelCase ) )
Path(save_path / F"""{split}.target""" ).open('w' ).write('\n'.join(UpperCamelCase ) )
for split in ["val", "test"]:
_lowerCamelCase , _lowerCamelCase = data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
shutil.copyfile(UpperCamelCase , save_path / F"""{split}.source""" )
shutil.copyfile(UpperCamelCase , save_path / F"""{split}.target""" )
def lowerCamelCase ( ) -> int:
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('--tok_name' , type=UpperCamelCase , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('--max_seq_len' , type=UpperCamelCase , default=1_28 )
parser.add_argument('--data_dir' , type=UpperCamelCase )
parser.add_argument('--save_path' , type=UpperCamelCase )
_lowerCamelCase = parser.parse_args()
_lowerCamelCase = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(UpperCamelCase , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 234
| 1
|
'''simple docstring'''
def UpperCamelCase_ ( A__ : int , A__ : int ):
'''simple docstring'''
return 1 if input_a == input_a else 0
def UpperCamelCase_ ( ):
'''simple docstring'''
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 275
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __snake_case ( unittest.TestCase):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : Union[str, Any]=7 , lowerCamelCase : Any=3 , lowerCamelCase : Tuple=18 , lowerCamelCase : List[Any]=30 , lowerCamelCase : str=4_00 , lowerCamelCase : List[str]=True , lowerCamelCase : List[Any]=None , lowerCamelCase : Any=True , ) -> Any:
lowerCAmelCase_ : Optional[int] = size if size is not None else {"""height""": 18, """width""": 18}
lowerCAmelCase_ : Dict = parent
lowerCAmelCase_ : Tuple = batch_size
lowerCAmelCase_ : int = num_channels
lowerCAmelCase_ : List[str] = image_size
lowerCAmelCase_ : Optional[int] = min_resolution
lowerCAmelCase_ : List[str] = max_resolution
lowerCAmelCase_ : List[str] = do_resize
lowerCAmelCase_ : str = size
lowerCAmelCase_ : int = apply_ocr
def __lowercase ( self : Optional[Any] ) -> List[Any]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __snake_case ( _SCREAMING_SNAKE_CASE ,unittest.TestCase):
"""simple docstring"""
lowercase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __lowercase ( self : List[Any] ) -> Dict:
lowerCAmelCase_ : Union[str, Any] = LayoutLMvaImageProcessingTester(self )
@property
def __lowercase ( self : Optional[Any] ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __lowercase ( self : Optional[Any] ) -> Dict:
lowerCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase , """size""" ) )
self.assertTrue(hasattr(lowerCamelCase , """apply_ocr""" ) )
def __lowercase ( self : List[str] ) -> int:
lowerCAmelCase_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
lowerCAmelCase_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def __lowercase ( self : str ) -> Dict:
pass
def __lowercase ( self : str ) -> Union[str, Any]:
# Initialize image_processing
lowerCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
lowerCAmelCase_ : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , lowerCamelCase )
self.assertIsInstance(encoding.boxes , lowerCamelCase )
# Test batched
lowerCAmelCase_ : Union[str, Any] = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __lowercase ( self : Any ) -> Any:
# Initialize image_processing
lowerCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
lowerCAmelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase_ : Optional[int] = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __lowercase ( self : List[str] ) -> int:
# Initialize image_processing
lowerCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
lowerCAmelCase_ : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase_ : Tuple = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __lowercase ( self : int ) -> Optional[Any]:
# with apply_OCR = True
lowerCAmelCase_ : Optional[int] = LayoutLMvaImageProcessor()
from datasets import load_dataset
lowerCAmelCase_ : int = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" )
lowerCAmelCase_ : Any = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
lowerCAmelCase_ : Dict = image_processing(lowerCamelCase , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
lowerCAmelCase_ : Union[str, Any] = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
lowerCAmelCase_ : Any = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , lowerCamelCase )
self.assertListEqual(encoding.boxes , lowerCamelCase )
# with apply_OCR = False
lowerCAmelCase_ : Dict = LayoutLMvaImageProcessor(apply_ocr=lowerCamelCase )
lowerCAmelCase_ : Dict = image_processing(lowerCamelCase , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 275
| 1
|
'''simple docstring'''
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : str = logging.get_logger(__name__)
UpperCamelCase__ : Optional[Any] = {
'kakaobrain/align-base': 'https://huggingface.co/kakaobrain/align-base/resolve/main/config.json',
}
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''align_text_model'''
def __init__( self , _lowerCamelCase=3_0522 , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-12 , _lowerCamelCase=0 , _lowerCamelCase="absolute" , _lowerCamelCase=True , **_lowerCamelCase , ) -> Optional[int]:
super().__init__(**_lowerCamelCase )
A_ : Any = vocab_size
A_ : Optional[Any] = hidden_size
A_ : Tuple = num_hidden_layers
A_ : Any = num_attention_heads
A_ : List[str] = hidden_act
A_ : Union[str, Any] = intermediate_size
A_ : Any = hidden_dropout_prob
A_ : Tuple = attention_probs_dropout_prob
A_ : List[Any] = max_position_embeddings
A_ : Optional[Any] = type_vocab_size
A_ : int = initializer_range
A_ : List[Any] = layer_norm_eps
A_ : Any = position_embedding_type
A_ : Any = use_cache
A_ : List[Any] = pad_token_id
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase , **_lowerCamelCase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_lowerCamelCase )
A_ , A_ : List[Any] = cls.get_config_dict(_lowerCamelCase , **_lowerCamelCase )
# get the text config dict if we are loading from AlignConfig
if config_dict.get("""model_type""" ) == "align":
A_ : Union[str, Any] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_lowerCamelCase , **_lowerCamelCase )
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''align_vision_model'''
def __init__( self , _lowerCamelCase = 3 , _lowerCamelCase = 600 , _lowerCamelCase = 2.0 , _lowerCamelCase = 3.1 , _lowerCamelCase = 8 , _lowerCamelCase = [3, 3, 5, 3, 5, 5, 3] , _lowerCamelCase = [32, 16, 24, 40, 80, 112, 192] , _lowerCamelCase = [16, 24, 40, 80, 112, 192, 320] , _lowerCamelCase = [] , _lowerCamelCase = [1, 2, 2, 2, 1, 2, 1] , _lowerCamelCase = [1, 2, 2, 3, 3, 4, 1] , _lowerCamelCase = [1, 6, 6, 6, 6, 6, 6] , _lowerCamelCase = 0.25 , _lowerCamelCase = "swish" , _lowerCamelCase = 2560 , _lowerCamelCase = "mean" , _lowerCamelCase = 0.02 , _lowerCamelCase = 0.001 , _lowerCamelCase = 0.99 , _lowerCamelCase = 0.2 , **_lowerCamelCase , ) -> Tuple:
super().__init__(**_lowerCamelCase )
A_ : Optional[int] = num_channels
A_ : Union[str, Any] = image_size
A_ : List[Any] = width_coefficient
A_ : int = depth_coefficient
A_ : Union[str, Any] = depth_divisor
A_ : Optional[int] = kernel_sizes
A_ : Union[str, Any] = in_channels
A_ : int = out_channels
A_ : List[str] = depthwise_padding
A_ : int = strides
A_ : Union[str, Any] = num_block_repeats
A_ : Union[str, Any] = expand_ratios
A_ : Tuple = squeeze_expansion_ratio
A_ : Dict = hidden_act
A_ : Optional[int] = hidden_dim
A_ : Dict = pooling_type
A_ : Optional[int] = initializer_range
A_ : str = batch_norm_eps
A_ : str = batch_norm_momentum
A_ : Tuple = drop_connect_rate
A_ : Any = sum(_lowerCamelCase ) * 4
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase , **_lowerCamelCase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_lowerCamelCase )
A_ , A_ : int = cls.get_config_dict(_lowerCamelCase , **_lowerCamelCase )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("""model_type""" ) == "align":
A_ : Dict = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_lowerCamelCase , **_lowerCamelCase )
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''align'''
lowerCamelCase = True
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=640 , _lowerCamelCase=1.0 , _lowerCamelCase=0.02 , **_lowerCamelCase , ) -> str:
super().__init__(**_lowerCamelCase )
if text_config is None:
A_ : str = {}
logger.info("""text_config is None. Initializing the AlignTextConfig with default values.""" )
if vision_config is None:
A_ : Dict = {}
logger.info("""vision_config is None. Initializing the AlignVisionConfig with default values.""" )
A_ : Any = AlignTextConfig(**_lowerCamelCase )
A_ : List[Any] = AlignVisionConfig(**_lowerCamelCase )
A_ : Optional[int] = projection_dim
A_ : Union[str, Any] = temperature_init_value
A_ : List[Any] = initializer_range
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ) -> Any:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> int:
A_ : str = copy.deepcopy(self.__dict__ )
A_ : str = self.text_config.to_dict()
A_ : Dict = self.vision_config.to_dict()
A_ : Any = self.__class__.model_type
return output
| 385
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def UpperCAmelCase ( a_ ) -> Union[str, Any]:
"""simple docstring"""
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
A_ : int = create_tensor(a_ )
A_ : Any = gather(a_ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
A_ : List[str] = [state.process_index]
A_ : Optional[Any] = gather_object(a_ )
assert len(a_ ) == state.num_processes, F"{gathered_obj}, {len(a_ )} != {state.num_processes}"
assert gathered_obj == list(range(state.num_processes ) ), F"{gathered_obj} != {list(range(state.num_processes ) )}"
def UpperCAmelCase ( a_ ) -> List[str]:
"""simple docstring"""
A_ : List[str] = create_tensor(a_ )
A_ : Optional[Any] = broadcast(a_ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
if state.is_main_process:
A_ : Optional[int] = torch.arange(state.num_processes + 1 ).to(state.device )
else:
A_ : Any = torch.arange(state.num_processes ).to(state.device )
A_ : Union[str, Any] = pad_across_processes(a_ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
if state.num_processes != 2:
return
A_ : Tuple = create_tensor(a_ )
A_ : Optional[Any] = reduce(a_ , """sum""" )
A_ : str = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(a_ , a_ ), F"{reduced_tensor} != {truth_tensor}"
def UpperCAmelCase ( a_ ) -> str:
"""simple docstring"""
if state.num_processes != 2:
return
A_ : str = create_tensor(a_ )
A_ : int = reduce(a_ , """mean""" )
A_ : Optional[Any] = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(a_ , a_ ), F"{reduced_tensor} != {truth_tensor}"
def UpperCAmelCase ( a_ ) -> List[Any]:
"""simple docstring"""
main()
def UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
A_ : Union[str, Any] = PartialState()
state.print(F"State: {state}" )
state.print("""testing gather""" )
test_gather(a_ )
state.print("""testing gather_object""" )
test_gather_object(a_ )
state.print("""testing broadcast""" )
test_broadcast(a_ )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(a_ )
state.print("""testing reduce_sum""" )
test_reduce_sum(a_ )
state.print("""testing reduce_mean""" )
test_reduce_mean(a_ )
if __name__ == "__main__":
main()
| 385
| 1
|
__a = [
(1_0_0_0, 'M'),
(9_0_0, 'CM'),
(5_0_0, 'D'),
(4_0_0, 'CD'),
(1_0_0, 'C'),
(9_0, 'XC'),
(5_0, 'L'),
(4_0, 'XL'),
(1_0, 'X'),
(9, 'IX'),
(5, 'V'),
(4, 'IV'),
(1, 'I'),
]
def a ( snake_case__: str ):
'''simple docstring'''
lowercase_ = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1_000}
lowercase_ = 0
lowercase_ = 0
while place < len(snake_case__ ):
if (place + 1 < len(snake_case__ )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def a ( snake_case__: int ):
'''simple docstring'''
lowercase_ = []
for arabic, roman in ROMAN:
((lowercase_) , (lowercase_)) = divmod(snake_case__ , snake_case__ )
result.append(roman * factor )
if number == 0:
break
return "".join(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 97
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
__A : List[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" ,type=a__ ,default=1 ,help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" ,type=a__ ,help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) ,)
# rest from the training program
parser.add_argument("""training_script_args""" ,nargs=a__ )
return parser.parse_args()
def __SCREAMING_SNAKE_CASE ( ) -> str:
__A : Union[str, Any] = parse_args()
# Import training_script as a module.
__A : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__A : str = script_fpath.stem
__A : int = importlib.import_module(a__ )
# Patch sys.argv
__A : List[str] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 17
| 0
|
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
__SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
class lowerCAmelCase_ ( __A ):
'''simple docstring'''
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.layer[current_layer](__UpperCAmelCase , __UpperCAmelCase , head_mask[current_layer] )
SCREAMING_SNAKE_CASE_ : Optional[int] =layer_outputs[0]
return hidden_states
@add_start_docstrings(
'The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.' , __A , )
class lowerCAmelCase_ ( __A ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase ):
super().__init__(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict =BertEncoderWithPabee(__UpperCAmelCase )
self.init_weights()
SCREAMING_SNAKE_CASE_ : int =0
SCREAMING_SNAKE_CASE_ : List[Any] =0
SCREAMING_SNAKE_CASE_ : Dict =0
SCREAMING_SNAKE_CASE_ : Any =0
def __lowerCamelCase ( self , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : Dict =threshold
def __lowerCamelCase ( self , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] =patience
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Tuple =0
SCREAMING_SNAKE_CASE_ : List[str] =0
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Any =self.inference_layers_num / self.inference_instances_num
SCREAMING_SNAKE_CASE_ : List[str] =(
F"""*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="""
F""" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"""
)
print(__UpperCAmelCase )
@add_start_docstrings_to_model_forward(__UpperCAmelCase )
def __lowerCamelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=False , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
SCREAMING_SNAKE_CASE_ : Optional[int] =input_ids.size()
elif inputs_embeds is not None:
SCREAMING_SNAKE_CASE_ : Tuple =inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
SCREAMING_SNAKE_CASE_ : Optional[Any] =input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
SCREAMING_SNAKE_CASE_ : Any =torch.ones(__UpperCAmelCase , device=__UpperCAmelCase )
if token_type_ids is None:
SCREAMING_SNAKE_CASE_ : List[Any] =torch.zeros(__UpperCAmelCase , dtype=torch.long , device=__UpperCAmelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
SCREAMING_SNAKE_CASE_ : torch.Tensor =self.get_extended_attention_mask(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
SCREAMING_SNAKE_CASE_ : Dict =encoder_hidden_states.size()
SCREAMING_SNAKE_CASE_ : List[str] =(encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
SCREAMING_SNAKE_CASE_ : List[str] =torch.ones(__UpperCAmelCase , device=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] =self.invert_attention_mask(__UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : Dict =None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
SCREAMING_SNAKE_CASE_ : Dict =self.get_head_mask(__UpperCAmelCase , self.config.num_hidden_layers )
SCREAMING_SNAKE_CASE_ : Any =self.embeddings(
input_ids=__UpperCAmelCase , position_ids=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , inputs_embeds=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] =embedding_output
if self.training:
SCREAMING_SNAKE_CASE_ : int =[]
for i in range(self.config.num_hidden_layers ):
SCREAMING_SNAKE_CASE_ : List[Any] =self.encoder.adaptive_forward(
__UpperCAmelCase , current_layer=__UpperCAmelCase , attention_mask=__UpperCAmelCase , head_mask=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : str =self.pooler(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =output_layers[i](output_dropout(__UpperCAmelCase ) )
res.append(__UpperCAmelCase )
elif self.patience == 0: # Use all layers for inference
SCREAMING_SNAKE_CASE_ : Optional[Any] =self.encoder(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , head_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , )
SCREAMING_SNAKE_CASE_ : Optional[Any] =self.pooler(encoder_outputs[0] )
SCREAMING_SNAKE_CASE_ : Optional[int] =[output_layers[self.config.num_hidden_layers - 1](__UpperCAmelCase )]
else:
SCREAMING_SNAKE_CASE_ : List[str] =0
SCREAMING_SNAKE_CASE_ : str =None
SCREAMING_SNAKE_CASE_ : Dict =0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
SCREAMING_SNAKE_CASE_ : Dict =self.encoder.adaptive_forward(
__UpperCAmelCase , current_layer=__UpperCAmelCase , attention_mask=__UpperCAmelCase , head_mask=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] =self.pooler(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple =output_layers[i](__UpperCAmelCase )
if regression:
SCREAMING_SNAKE_CASE_ : Optional[int] =logits.detach()
if patient_result is not None:
SCREAMING_SNAKE_CASE_ : Any =patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
SCREAMING_SNAKE_CASE_ : Dict =0
else:
SCREAMING_SNAKE_CASE_ : Any =logits.detach().argmax(dim=1 )
if patient_result is not None:
SCREAMING_SNAKE_CASE_ : List[str] =patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(__UpperCAmelCase ) ):
patient_counter += 1
else:
SCREAMING_SNAKE_CASE_ : Tuple =0
SCREAMING_SNAKE_CASE_ : Dict =logits
if patient_counter == self.patience:
break
SCREAMING_SNAKE_CASE_ : Optional[int] =[patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
'Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. ' , __A , )
class lowerCAmelCase_ ( __A ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase ):
super().__init__(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : int =config.num_labels
SCREAMING_SNAKE_CASE_ : Optional[Any] =BertModelWithPabee(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] =nn.Dropout(config.hidden_dropout_prob )
SCREAMING_SNAKE_CASE_ : str =nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(__UpperCAmelCase )
def __lowerCamelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , ):
SCREAMING_SNAKE_CASE_ : str =self.bert(
input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , position_ids=__UpperCAmelCase , head_mask=__UpperCAmelCase , inputs_embeds=__UpperCAmelCase , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
SCREAMING_SNAKE_CASE_ : Dict =(logits[-1],)
if labels is not None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =None
SCREAMING_SNAKE_CASE_ : Optional[Any] =0
for ix, logits_item in enumerate(__UpperCAmelCase ):
if self.num_labels == 1:
# We are doing regression
SCREAMING_SNAKE_CASE_ : Any =MSELoss()
SCREAMING_SNAKE_CASE_ : Dict =loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
SCREAMING_SNAKE_CASE_ : str =CrossEntropyLoss()
SCREAMING_SNAKE_CASE_ : List[Any] =loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
SCREAMING_SNAKE_CASE_ : Optional[int] =loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
SCREAMING_SNAKE_CASE_ : Optional[Any] =(total_loss / total_weights,) + outputs
return outputs
| 701
|
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.dummy_uncond_unet
SCREAMING_SNAKE_CASE_ : Dict =KarrasVeScheduler()
SCREAMING_SNAKE_CASE_ : Tuple =KarrasVePipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : int =pipe(num_inference_steps=2 , generator=__UpperCAmelCase , output_type='numpy' ).images
SCREAMING_SNAKE_CASE_ : Any =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : int =pipe(num_inference_steps=2 , generator=__UpperCAmelCase , output_type='numpy' , return_dict=__UpperCAmelCase )[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : str =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE_ : Tuple =np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] ='google/ncsnpp-celebahq-256'
SCREAMING_SNAKE_CASE_ : List[str] =UNetaDModel.from_pretrained(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : int =KarrasVeScheduler()
SCREAMING_SNAKE_CASE_ : Dict =KarrasVePipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Optional[int] =pipe(num_inference_steps=20 , generator=__UpperCAmelCase , output_type='numpy' ).images
SCREAMING_SNAKE_CASE_ : int =image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
SCREAMING_SNAKE_CASE_ : List[Any] =np.array([0.578, 0.5_811, 0.5_924, 0.5_809, 0.587, 0.5_886, 0.5_861, 0.5_802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 153
| 0
|
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
__lowerCamelCase : Any = importlib.util.find_spec("s3fs") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
__lowerCamelCase : List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowerCamelCase_(lowerCamelCase_ ) -> str:
if "://" in dataset_path:
UpperCAmelCase = dataset_path.split("://" )[1]
return dataset_path
def lowerCamelCase_(lowerCamelCase_ ) -> bool:
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
UpperCAmelCase = not is_remote_filesystem(lowerCamelCase_ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(lowerCamelCase_ ) , fs._strip_protocol(lowerCamelCase_ ) )
else:
fs.mv(lowerCamelCase_ , lowerCamelCase_ , recursive=lowerCamelCase_ )
def lowerCamelCase_() -> None:
if hasattr(fsspec.asyn , "reset_lock" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = threading.Lock()
| 323
|
import os
from datetime import datetime as dt
from github import Github
__lowerCamelCase : Optional[int] = [
"good first issue",
"good second issue",
"good difficult issue",
"enhancement",
"new pipeline/model",
"new scheduler",
"wip",
]
def lowerCamelCase_() -> List[str]:
UpperCAmelCase = Github(os.environ["GITHUB_TOKEN"] )
UpperCAmelCase = g.get_repo("huggingface/diffusers" )
UpperCAmelCase = repo.get_issues(state="open" )
for issue in open_issues:
UpperCAmelCase = sorted(issue.get_comments() , key=lambda lowerCamelCase_ : i.created_at , reverse=lowerCamelCase_ )
UpperCAmelCase = comments[0] if len(lowerCamelCase_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="closed" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open" )
issue.remove_from_labels("stale" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
issue.add_to_labels("stale" )
if __name__ == "__main__":
main()
| 323
| 1
|
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __UpperCamelCase :
'''simple docstring'''
@staticmethod
def __snake_case ( *UpperCAmelCase_ , **UpperCAmelCase_ ):
pass
@is_pipeline_test
@require_vision
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
__a : Union[str, Any] =MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
lowerCAmelCase = [
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
}
]
return object_detector, examples
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase = object_detector(examples[0] , threshold=0.0 )
lowerCAmelCase = len(UpperCAmelCase_ )
self.assertGreater(UpperCAmelCase_ , 0 )
self.assertEqual(
UpperCAmelCase_ , [
{
'''score''': ANY(UpperCAmelCase_ ),
'''label''': ANY(UpperCAmelCase_ ),
'''box''': {'''xmin''': ANY(UpperCAmelCase_ ), '''ymin''': ANY(UpperCAmelCase_ ), '''xmax''': ANY(UpperCAmelCase_ ), '''ymax''': ANY(UpperCAmelCase_ )},
}
for i in range(UpperCAmelCase_ )
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def __snake_case ( self ):
pass
@require_torch
def __snake_case ( self ):
lowerCAmelCase = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
lowerCAmelCase = object_detector(
'''./tests/fixtures/tests_samples/COCO/000000039769.png''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=0.64 , )
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=4 ) , [
{'''score''': 0.7235, '''label''': '''cat''', '''box''': {'''xmin''': 2_04, '''ymin''': 1_67, '''xmax''': 2_32, '''ymax''': 1_90}},
{'''score''': 0.7218, '''label''': '''remote''', '''box''': {'''xmin''': 2_04, '''ymin''': 1_67, '''xmax''': 2_32, '''ymax''': 1_90}},
{'''score''': 0.7184, '''label''': '''couch''', '''box''': {'''xmin''': 2_04, '''ymin''': 1_67, '''xmax''': 2_32, '''ymax''': 1_90}},
{'''score''': 0.6748, '''label''': '''remote''', '''box''': {'''xmin''': 5_71, '''ymin''': 83, '''xmax''': 5_98, '''ymax''': 1_03}},
{'''score''': 0.6656, '''label''': '''cat''', '''box''': {'''xmin''': 5_71, '''ymin''': 83, '''xmax''': 5_98, '''ymax''': 1_03}},
{'''score''': 0.6614, '''label''': '''couch''', '''box''': {'''xmin''': 5_71, '''ymin''': 83, '''xmax''': 5_98, '''ymax''': 1_03}},
{'''score''': 0.6456, '''label''': '''remote''', '''box''': {'''xmin''': 4_94, '''ymin''': 1_05, '''xmax''': 5_21, '''ymax''': 1_27}},
{'''score''': 0.642, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 2_74, '''xmax''': 93, '''ymax''': 2_97}},
{'''score''': 0.6419, '''label''': '''cat''', '''box''': {'''xmin''': 4_94, '''ymin''': 1_05, '''xmax''': 5_21, '''ymax''': 1_27}},
] , )
lowerCAmelCase = object_detector(
[
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=4 ) , [
[
{'''score''': 0.7235, '''label''': '''cat''', '''box''': {'''xmin''': 2_04, '''ymin''': 1_67, '''xmax''': 2_32, '''ymax''': 1_90}},
{'''score''': 0.7218, '''label''': '''remote''', '''box''': {'''xmin''': 2_04, '''ymin''': 1_67, '''xmax''': 2_32, '''ymax''': 1_90}},
{'''score''': 0.7184, '''label''': '''couch''', '''box''': {'''xmin''': 2_04, '''ymin''': 1_67, '''xmax''': 2_32, '''ymax''': 1_90}},
{'''score''': 0.6748, '''label''': '''remote''', '''box''': {'''xmin''': 5_71, '''ymin''': 83, '''xmax''': 5_98, '''ymax''': 1_03}},
{'''score''': 0.6656, '''label''': '''cat''', '''box''': {'''xmin''': 5_71, '''ymin''': 83, '''xmax''': 5_98, '''ymax''': 1_03}},
{'''score''': 0.6614, '''label''': '''couch''', '''box''': {'''xmin''': 5_71, '''ymin''': 83, '''xmax''': 5_98, '''ymax''': 1_03}},
{'''score''': 0.6456, '''label''': '''remote''', '''box''': {'''xmin''': 4_94, '''ymin''': 1_05, '''xmax''': 5_21, '''ymax''': 1_27}},
{'''score''': 0.642, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 2_74, '''xmax''': 93, '''ymax''': 2_97}},
{'''score''': 0.6419, '''label''': '''cat''', '''box''': {'''xmin''': 4_94, '''ymin''': 1_05, '''xmax''': 5_21, '''ymax''': 1_27}},
]
] , )
@require_torch
@slow
def __snake_case ( self ):
lowerCAmelCase = pipeline('''zero-shot-object-detection''' )
lowerCAmelCase = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , )
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=4 ) , [
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 3_24, '''ymin''': 20, '''xmax''': 6_40, '''ymax''': 3_73}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 1_77, '''ymax''': 1_15}},
{'''score''': 0.2537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 3_15, '''ymax''': 4_72}},
{'''score''': 0.1474, '''label''': '''remote''', '''box''': {'''xmin''': 3_35, '''ymin''': 74, '''xmax''': 3_71, '''ymax''': 1_87}},
{'''score''': 0.1208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_42, '''ymax''': 4_76}},
] , )
lowerCAmelCase = object_detector(
[
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
] , )
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=4 ) , [
[
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 3_24, '''ymin''': 20, '''xmax''': 6_40, '''ymax''': 3_73}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 1_77, '''ymax''': 1_15}},
{'''score''': 0.2537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 3_15, '''ymax''': 4_72}},
{'''score''': 0.1474, '''label''': '''remote''', '''box''': {'''xmin''': 3_35, '''ymin''': 74, '''xmax''': 3_71, '''ymax''': 1_87}},
{'''score''': 0.1208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_42, '''ymax''': 4_76}},
],
[
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 3_24, '''ymin''': 20, '''xmax''': 6_40, '''ymax''': 3_73}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 1_77, '''ymax''': 1_15}},
{'''score''': 0.2537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 3_15, '''ymax''': 4_72}},
{'''score''': 0.1474, '''label''': '''remote''', '''box''': {'''xmin''': 3_35, '''ymin''': 74, '''xmax''': 3_71, '''ymax''': 1_87}},
{'''score''': 0.1208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_42, '''ymax''': 4_76}},
],
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def __snake_case ( self ):
pass
@require_torch
@slow
def __snake_case ( self ):
lowerCAmelCase = 0.2
lowerCAmelCase = pipeline('''zero-shot-object-detection''' )
lowerCAmelCase = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=UpperCAmelCase_ , )
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=4 ) , [
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 3_24, '''ymin''': 20, '''xmax''': 6_40, '''ymax''': 3_73}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 1_77, '''ymax''': 1_15}},
{'''score''': 0.2537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 3_15, '''ymax''': 4_72}},
] , )
@require_torch
@slow
def __snake_case ( self ):
lowerCAmelCase = 2
lowerCAmelCase = pipeline('''zero-shot-object-detection''' )
lowerCAmelCase = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , top_k=UpperCAmelCase_ , )
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=4 ) , [
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 3_24, '''ymin''': 20, '''xmax''': 6_40, '''ymax''': 3_73}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 1_77, '''ymax''': 1_15}},
] , )
| 713
|
from collections.abc import Sequence
def UpperCAmelCase ( _snake_case , _snake_case = False ):
if not arr:
return 0
lowerCAmelCase = 0 if allow_empty_subarrays else float('''-inf''' )
lowerCAmelCase = 0.0
for num in arr:
lowerCAmelCase = max(0 if allow_empty_subarrays else num , curr_sum + num )
lowerCAmelCase = max(_snake_case , _snake_case )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase_ =[-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F'''{max_subarray_sum(nums) = }''')
| 33
| 0
|
def UpperCamelCase_( _snake_case : int = 1 , _snake_case : int = 1000 ):
"""simple docstring"""
__a =1
__a =0
for divide_by_number in range(_snake_case , digit + 1 ):
__a =[]
__a =numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(_snake_case ):
__a =len(_snake_case )
__a =divide_by_number
else:
has_been_divided.append(_snake_case )
__a =now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 242
|
def UpperCamelCase_( _snake_case : int = 600851475143 ):
"""simple docstring"""
try:
__a =int(_snake_case )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
__a =2
__a =0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
__a =i
while n % i == 0:
__a =n // i
i += 1
return int(_snake_case )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 242
| 1
|
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse("3.8"):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _a ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict=False ):
"""simple docstring"""
try:
UpperCamelCase__ : Tuple = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCamelCase__ : Optional[Any] = default
else:
# KEY is set, convert it to True or False.
try:
UpperCamelCase__ : str = strtobool(SCREAMING_SNAKE_CASE )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
__UpperCamelCase : Optional[int] = parse_flag_from_env("RUN_SLOW", default=False)
__UpperCamelCase : Optional[Any] = parse_flag_from_env("RUN_REMOTE", default=False)
__UpperCamelCase : Union[str, Any] = parse_flag_from_env("RUN_LOCAL", default=True)
__UpperCamelCase : Union[str, Any] = parse_flag_from_env("RUN_PACKAGED", default=True)
# Compression
__UpperCamelCase : Union[str, Any] = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="test requires lz4")
__UpperCamelCase : Tuple = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="test requires py7zr")
__UpperCamelCase : Union[str, Any] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="test requires zstandard")
# Audio
__UpperCamelCase : Union[str, Any] = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("soundfile") is None or version.parse(importlib_metadata.version("soundfile")) < version.parse("0.12.0"),
reason="test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; ",
)
# Beam
__UpperCamelCase : int = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("0.3.2"),
reason="test requires apache-beam and a compatible dill version",
)
# Dill-cloudpickle compatibility
__UpperCamelCase : List[Any] = pytest.mark.skipif(
config.DILL_VERSION <= version.parse("0.3.2"),
reason="test requires dill>0.3.2 for cloudpickle compatibility",
)
# Windows
__UpperCamelCase : int = pytest.mark.skipif(
sys.platform == "win32",
reason="test should not be run on Windows",
)
def _a ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
UpperCamelCase__ : int = unittest.skip('''test requires faiss''' )(SCREAMING_SNAKE_CASE )
return test_case
def _a ( SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
try:
import regex # noqa
except ImportError:
UpperCamelCase__ : List[Any] = unittest.skip('''test requires regex''' )(SCREAMING_SNAKE_CASE )
return test_case
def _a ( SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
UpperCamelCase__ : Tuple = unittest.skip('''test requires elasticsearch''' )(SCREAMING_SNAKE_CASE )
return test_case
def _a ( SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
UpperCamelCase__ : str = unittest.skip('''test requires sqlalchemy''' )(SCREAMING_SNAKE_CASE )
return test_case
def _a ( SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
if not config.TORCH_AVAILABLE:
UpperCamelCase__ : str = unittest.skip('''test requires PyTorch''' )(SCREAMING_SNAKE_CASE )
return test_case
def _a ( SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
if not config.TF_AVAILABLE:
UpperCamelCase__ : List[Any] = unittest.skip('''test requires TensorFlow''' )(SCREAMING_SNAKE_CASE )
return test_case
def _a ( SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
if not config.JAX_AVAILABLE:
UpperCamelCase__ : Dict = unittest.skip('''test requires JAX''' )(SCREAMING_SNAKE_CASE )
return test_case
def _a ( SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
if not config.PIL_AVAILABLE:
UpperCamelCase__ : int = unittest.skip('''test requires Pillow''' )(SCREAMING_SNAKE_CASE )
return test_case
def _a ( SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''' )(SCREAMING_SNAKE_CASE )
else:
return test_case
def _a ( SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''' )(SCREAMING_SNAKE_CASE )
else:
return test_case
def _a ( SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''' )(SCREAMING_SNAKE_CASE )
else:
return test_case
def _a ( SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
def _require_spacy_model(SCREAMING_SNAKE_CASE : Dict ):
try:
import spacy # noqa F401
spacy.load(SCREAMING_SNAKE_CASE )
except ImportError:
return unittest.skip('''test requires spacy''' )(SCREAMING_SNAKE_CASE )
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(SCREAMING_SNAKE_CASE ) )(SCREAMING_SNAKE_CASE )
else:
return test_case
return _require_spacy_model
def _a ( SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''' )(SCREAMING_SNAKE_CASE )
else:
return test_case
def _a ( SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''' )(SCREAMING_SNAKE_CASE )
else:
return test_case
def _a ( SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
UpperCamelCase__ : Any = unittest.skip('''test is slow''' )(SCREAMING_SNAKE_CASE )
return test_case
def _a ( SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
UpperCamelCase__ : Dict = unittest.skip('''test is local''' )(SCREAMING_SNAKE_CASE )
return test_case
def _a ( SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
UpperCamelCase__ : Optional[Any] = unittest.skip('''test is packaged''' )(SCREAMING_SNAKE_CASE )
return test_case
def _a ( SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
UpperCamelCase__ : int = unittest.skip('''test requires remote''' )(SCREAMING_SNAKE_CASE )
return test_case
def _a ( *SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
def decorate(cls : Optional[int] ):
for name, fn in cls.__dict__.items():
if callable(SCREAMING_SNAKE_CASE ) and name.startswith('''test''' ):
for decorator in decorators:
UpperCamelCase__ : List[Any] = decorator(SCREAMING_SNAKE_CASE )
setattr(cls , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return cls
return decorate
class __magic_name__ ( __lowerCAmelCase):
pass
class __magic_name__ ( __lowerCAmelCase):
A: Tuple = 0
A: Tuple = 1
A: Tuple = 2
@contextmanager
def _a ( SCREAMING_SNAKE_CASE : List[Any]=OfflineSimulationMode.CONNECTION_FAILS , SCREAMING_SNAKE_CASE : Union[str, Any]=1E-16 ):
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = requests.Session().request
def timeout_request(SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ):
# Change the url to an invalid url so that the connection hangs
UpperCamelCase__ : Tuple = '''https://10.255.255.1'''
if kwargs.get('''timeout''' ) is None:
raise RequestWouldHangIndefinitelyError(
F"Tried a call to {url} in offline mode with no timeout set. Please set a timeout." )
UpperCamelCase__ : Union[str, Any] = timeout
try:
return online_request(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
UpperCamelCase__ : List[Any] = url
UpperCamelCase__ : int = e.args[0]
UpperCamelCase__ : Tuple = (max_retry_error.args[0].replace('''10.255.255.1''' , F"OfflineMock[{url}]" ),)
UpperCamelCase__ : Union[str, Any] = (max_retry_error,)
raise
def raise_connection_error(SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : Optional[Any] ):
raise requests.ConnectionError('''Offline mode is enabled.''' , request=SCREAMING_SNAKE_CASE )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''' , SCREAMING_SNAKE_CASE ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''' , SCREAMING_SNAKE_CASE ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''' , SCREAMING_SNAKE_CASE ):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' )
@contextmanager
def _a ( *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
UpperCamelCase__ : int = str(Path().resolve() )
with tempfile.TemporaryDirectory(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) as tmp_dir:
try:
os.chdir(SCREAMING_SNAKE_CASE )
yield
finally:
os.chdir(SCREAMING_SNAKE_CASE )
@contextmanager
def _a ( ):
"""simple docstring"""
import gc
gc.collect()
UpperCamelCase__ : Any = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _a ( ):
"""simple docstring"""
import gc
gc.collect()
UpperCamelCase__ : Tuple = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _a ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
return deepcopy(SCREAMING_SNAKE_CASE ).integers(0 , 100 , 10 ).tolist() == deepcopy(SCREAMING_SNAKE_CASE ).integers(0 , 100 , 10 ).tolist()
def _a ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(SCREAMING_SNAKE_CASE : Optional[int] , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Optional[int] ):
try:
return func(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
except HTTPError as err:
if str(SCREAMING_SNAKE_CASE ).startswith('''500''' ) or str(SCREAMING_SNAKE_CASE ).startswith('''502''' ):
pytest.xfail(str(SCREAMING_SNAKE_CASE ) )
raise err
return decorator.decorator(_wrapper , SCREAMING_SNAKE_CASE )
class __magic_name__ :
def __init__( self : Union[str, Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Dict ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : List[Any] = returncode
UpperCamelCase__ : Tuple = stdout
UpperCamelCase__ : Any = stderr
async def _a ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
while True:
UpperCamelCase__ : List[Any] = await stream.readline()
if line:
callback(SCREAMING_SNAKE_CASE )
else:
break
async def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : Tuple=None , SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : Optional[int]=False , SCREAMING_SNAKE_CASE : Optional[int]=False ):
"""simple docstring"""
if echo:
print('''\nRunning: ''' , ''' '''.join(SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : Optional[Any] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=SCREAMING_SNAKE_CASE , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=SCREAMING_SNAKE_CASE , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCamelCase__ : Optional[Any] = []
UpperCamelCase__ : str = []
def tee(SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[Any]="" ):
UpperCamelCase__ : int = line.decode('''utf-8''' ).rstrip()
sink.append(SCREAMING_SNAKE_CASE )
if not quiet:
print(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , file=SCREAMING_SNAKE_CASE )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda SCREAMING_SNAKE_CASE : tee(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , sys.stdout , label='''stdout:''' ) ),
_read_stream(p.stderr , lambda SCREAMING_SNAKE_CASE : tee(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , sys.stderr , label='''stderr:''' ) ),
] , timeout=SCREAMING_SNAKE_CASE , )
return _RunOutput(await p.wait() , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int=None , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : str=180 , SCREAMING_SNAKE_CASE : Tuple=False , SCREAMING_SNAKE_CASE : List[str]=True ):
"""simple docstring"""
UpperCamelCase__ : str = asyncio.get_event_loop()
UpperCamelCase__ : Any = loop.run_until_complete(
_stream_subprocess(SCREAMING_SNAKE_CASE , env=SCREAMING_SNAKE_CASE , stdin=SCREAMING_SNAKE_CASE , timeout=SCREAMING_SNAKE_CASE , quiet=SCREAMING_SNAKE_CASE , echo=SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : List[Any] = ''' '''.join(SCREAMING_SNAKE_CASE )
if result.returncode > 0:
UpperCamelCase__ : Optional[int] = '''\n'''.join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F"'{cmd_str}' produced no output." )
return result
def _a ( ):
"""simple docstring"""
UpperCamelCase__ : int = os.environ.get('''PYTEST_XDIST_WORKER''' , '''gw0''' )
UpperCamelCase__ : Tuple = re.sub(r'''^gw''' , '''''' , SCREAMING_SNAKE_CASE , 0 , re.M )
return int(SCREAMING_SNAKE_CASE )
def _a ( ):
"""simple docstring"""
UpperCamelCase__ : Optional[int] = 29500
UpperCamelCase__ : Optional[Any] = pytest_xdist_worker_id()
return port + uniq_delta
| 720
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : Tuple = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = {"vocab_file": "sentencepiece.bpe.model"}
__UpperCamelCase : List[Any] = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
}
__UpperCamelCase : int = {
"moussaKam/mbarthez": 1024,
"moussaKam/barthez": 1024,
"moussaKam/barthez-orangesum-title": 1024,
}
__UpperCamelCase : int = "▁"
class __magic_name__ ( __lowerCAmelCase):
A: Tuple = VOCAB_FILES_NAMES
A: Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A: str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A: Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Tuple="<s>" , lowerCamelCase__ : Tuple="</s>" , lowerCamelCase__ : int="</s>" , lowerCamelCase__ : Optional[Any]="<s>" , lowerCamelCase__ : List[str]="<unk>" , lowerCamelCase__ : str="<pad>" , lowerCamelCase__ : int="<mask>" , lowerCamelCase__ : Optional[Dict[str, Any]] = None , **lowerCamelCase__ : Any , ) -> None:
'''simple docstring'''
UpperCamelCase__ : str = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
UpperCamelCase__ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase__ , )
UpperCamelCase__ : Any = vocab_file
UpperCamelCase__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase__ ) )
UpperCamelCase__ : Any = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
UpperCamelCase__ : Tuple = len(self.sp_model ) - 1
UpperCamelCase__ : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def UpperCAmelCase__ ( self : List[str] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase__ : List[str] = [self.cls_token_id]
UpperCamelCase__ : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase__ ( self : Dict , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None , lowerCamelCase__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1]
return [1] + ([0] * len(lowerCamelCase__ )) + [1, 1] + ([0] * len(lowerCamelCase__ )) + [1]
def UpperCAmelCase__ ( self : List[str] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
UpperCamelCase__ : int = [self.sep_token_id]
UpperCamelCase__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
return len(self.sp_model )
def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : Tuple = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase__ ( self : int , lowerCamelCase__ : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(lowerCamelCase__ , out_type=lowerCamelCase__ )
def UpperCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase__ : List[str] = self.sp_model.PieceToId(lowerCamelCase__ )
return spm_id if spm_id else self.unk_token_id
def UpperCAmelCase__ ( self : Dict , lowerCamelCase__ : int ) -> List[str]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(lowerCamelCase__ )
def UpperCAmelCase__ ( self : str , lowerCamelCase__ : Tuple ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = []
UpperCamelCase__ : Any = ''''''
UpperCamelCase__ : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCamelCase__ ) + token
UpperCamelCase__ : str = True
UpperCamelCase__ : Tuple = []
else:
current_sub_tokens.append(lowerCamelCase__ )
UpperCamelCase__ : Any = False
out_string += self.sp_model.decode(lowerCamelCase__ )
return out_string.strip()
def __getstate__( self : Tuple ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : str = self.__dict__.copy()
UpperCamelCase__ : int = None
return state
def __setstate__( self : Tuple , lowerCamelCase__ : Any ) -> str:
'''simple docstring'''
UpperCamelCase__ : List[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCamelCase__ : Optional[Any] = {}
UpperCamelCase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCamelCase__ : Any = os.path.join(
lowerCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase__ , '''wb''' ) as fi:
UpperCamelCase__ : int = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
return (out_vocab_file,)
| 106
| 0
|
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __lowercase :
def __init__( self : str ,A : Tuple ,A : List[str]=13 ,A : List[str]=7 ,A : Union[str, Any]=True ,A : Tuple=True ,A : List[Any]=False ,A : int=True ,A : str=99 ,A : int=32 ,A : Optional[Any]=5 ,A : int=4 ,A : int=37 ,A : Union[str, Any]="gelu" ,A : Dict=0.1 ,A : Optional[int]=0.1 ,A : List[str]=512 ,A : Union[str, Any]=16 ,A : int=2 ,A : Optional[Any]=0.0_2 ,A : Optional[int]=3 ,A : Dict=4 ,A : Tuple=None ,):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = parent
UpperCAmelCase__ : Any = batch_size
UpperCAmelCase__ : int = seq_length
UpperCAmelCase__ : Any = is_training
UpperCAmelCase__ : Any = use_input_mask
UpperCAmelCase__ : List[Any] = use_token_type_ids
UpperCAmelCase__ : List[str] = use_labels
UpperCAmelCase__ : Optional[int] = vocab_size
UpperCAmelCase__ : str = hidden_size
UpperCAmelCase__ : List[str] = num_hidden_layers
UpperCAmelCase__ : Dict = num_attention_heads
UpperCAmelCase__ : Optional[Any] = intermediate_size
UpperCAmelCase__ : str = hidden_act
UpperCAmelCase__ : int = hidden_dropout_prob
UpperCAmelCase__ : Any = attention_probs_dropout_prob
UpperCAmelCase__ : Tuple = max_position_embeddings
UpperCAmelCase__ : Tuple = type_vocab_size
UpperCAmelCase__ : List[str] = type_sequence_label_size
UpperCAmelCase__ : List[Any] = initializer_range
UpperCAmelCase__ : Optional[Any] = num_labels
UpperCAmelCase__ : List[str] = num_choices
UpperCAmelCase__ : List[str] = scope
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase__ : Optional[int] = None
if self.use_input_mask:
UpperCAmelCase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ : str = None
if self.use_token_type_ids:
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
UpperCAmelCase__ : Any = None
UpperCAmelCase__ : Dict = None
UpperCAmelCase__ : Optional[int] = None
if self.use_labels:
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase__ : str = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase__ : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowercase ( self : str ):
'''simple docstring'''
return OpenLlamaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__a ,initializer_range=self.initializer_range ,use_stable_embedding=__a ,)
def __lowercase ( self : List[str] ,A : Any ,A : List[Any] ,A : str ,A : List[str] ,A : int ,A : str ,A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = OpenLlamaModel(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase__ : int = model(__a ,attention_mask=__a )
UpperCAmelCase__ : List[Any] = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : Optional[Any] ,A : Tuple ,A : Tuple ,A : Tuple ,A : Union[str, Any] ,A : List[str] ,A : int ,A : Union[str, Any] ,A : Optional[Any] ,A : int ,):
'''simple docstring'''
UpperCAmelCase__ : Any = True
UpperCAmelCase__ : Optional[int] = OpenLlamaModel(__a )
model.to(__a )
model.eval()
UpperCAmelCase__ : Tuple = model(
__a ,attention_mask=__a ,encoder_hidden_states=__a ,encoder_attention_mask=__a ,)
UpperCAmelCase__ : Dict = model(
__a ,attention_mask=__a ,encoder_hidden_states=__a ,)
UpperCAmelCase__ : Optional[int] = model(__a ,attention_mask=__a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : Optional[int] ,A : Dict ,A : Tuple ,A : Optional[Any] ,A : Dict ,A : str ,A : Optional[Any] ,A : Optional[Any] ,A : Any ,A : Tuple ,):
'''simple docstring'''
UpperCAmelCase__ : List[str] = OpenLlamaForCausalLM(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase__ : Union[str, Any] = model(__a ,attention_mask=__a ,labels=__a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def __lowercase ( self : Tuple ,A : Optional[int] ,A : Optional[int] ,A : List[Any] ,A : Any ,A : Any ,A : Union[str, Any] ,A : Optional[int] ,A : List[Any] ,A : str ,):
'''simple docstring'''
UpperCAmelCase__ : int = True
UpperCAmelCase__ : Optional[int] = True
UpperCAmelCase__ : Dict = OpenLlamaForCausalLM(config=__a )
model.to(__a )
model.eval()
# first forward pass
UpperCAmelCase__ : List[Any] = model(
__a ,attention_mask=__a ,encoder_hidden_states=__a ,encoder_attention_mask=__a ,use_cache=__a ,)
UpperCAmelCase__ : int = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase__ : Tuple = ids_tensor((self.batch_size, 3) ,config.vocab_size )
UpperCAmelCase__ : List[str] = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
UpperCAmelCase__ : List[str] = torch.cat([input_ids, next_tokens] ,dim=-1 )
UpperCAmelCase__ : List[str] = torch.cat([input_mask, next_mask] ,dim=-1 )
UpperCAmelCase__ : Tuple = model(
__a ,attention_mask=__a ,encoder_hidden_states=__a ,encoder_attention_mask=__a ,output_hidden_states=__a ,)["""hidden_states"""][0]
UpperCAmelCase__ : List[str] = model(
__a ,attention_mask=__a ,encoder_hidden_states=__a ,encoder_attention_mask=__a ,past_key_values=__a ,output_hidden_states=__a ,)["""hidden_states"""][0]
# select random slice
UpperCAmelCase__ : str = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
UpperCAmelCase__ : int = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase__ : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__a ,__a ,atol=1e-3 ) )
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Union[str, Any] = config_and_inputs
UpperCAmelCase__ : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
snake_case_ = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
snake_case_ = (OpenLlamaForCausalLM,) if is_torch_available() else ()
snake_case_ = (
{
"""feature-extraction""": OpenLlamaModel,
"""text-classification""": OpenLlamaForSequenceClassification,
"""text-generation""": OpenLlamaForCausalLM,
"""zero-shot""": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = OpenLlamaModelTester(self )
UpperCAmelCase__ : int = ConfigTester(self ,config_class=__a ,hidden_size=37 )
def __lowercase ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase__ : Optional[Any] = type
self.model_tester.create_and_check_model(*__a )
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : str = 3
UpperCAmelCase__ : Optional[int] = input_dict["""input_ids"""]
UpperCAmelCase__ : int = input_ids.ne(1 ).to(__a )
UpperCAmelCase__ : Optional[Any] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
UpperCAmelCase__ : List[Any] = OpenLlamaForSequenceClassification(__a )
model.to(__a )
model.eval()
UpperCAmelCase__ : Optional[int] = model(__a ,attention_mask=__a ,labels=__a )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowercase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : str = 3
UpperCAmelCase__ : List[str] = """single_label_classification"""
UpperCAmelCase__ : Any = input_dict["""input_ids"""]
UpperCAmelCase__ : str = input_ids.ne(1 ).to(__a )
UpperCAmelCase__ : Any = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
UpperCAmelCase__ : Optional[Any] = OpenLlamaForSequenceClassification(__a )
model.to(__a )
model.eval()
UpperCAmelCase__ : int = model(__a ,attention_mask=__a ,labels=__a )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Tuple = 3
UpperCAmelCase__ : Optional[int] = """multi_label_classification"""
UpperCAmelCase__ : Optional[int] = input_dict["""input_ids"""]
UpperCAmelCase__ : List[str] = input_ids.ne(1 ).to(__a )
UpperCAmelCase__ : Dict = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCAmelCase__ : Optional[Any] = OpenLlamaForSequenceClassification(__a )
model.to(__a )
model.eval()
UpperCAmelCase__ : List[Any] = model(__a ,attention_mask=__a ,labels=__a )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def __lowercase ( self : int ,A : List[str] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Optional[int] = ids_tensor([1, 10] ,config.vocab_size )
UpperCAmelCase__ : Optional[int] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase__ : Optional[int] = OpenLlamaModel(__a )
original_model.to(__a )
original_model.eval()
UpperCAmelCase__ : str = original_model(__a ).last_hidden_state
UpperCAmelCase__ : int = original_model(__a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase__ : Any = {"""type""": scaling_type, """factor""": 10.0}
UpperCAmelCase__ : Union[str, Any] = OpenLlamaModel(__a )
scaled_model.to(__a )
scaled_model.eval()
UpperCAmelCase__ : Union[str, Any] = scaled_model(__a ).last_hidden_state
UpperCAmelCase__ : Any = scaled_model(__a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__a ,__a ,atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(__a ,__a ,atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__a ,__a ,atol=1e-5 ) )
| 65
|
'''simple docstring'''
from __future__ import annotations
__lowerCAmelCase : List[str] = 8.988e9 # units = N * m^s * C^-2
def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ):
"""simple docstring"""
__UpperCAmelCase = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if distance < 0:
raise ValueError('''Distance cannot be negative''' )
if force == 0:
__UpperCAmelCase = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
__UpperCAmelCase = abs(UpperCamelCase__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
__UpperCAmelCase = abs(UpperCamelCase__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
__UpperCAmelCase = (COULOMBS_CONSTANT * charge_product / abs(UpperCamelCase__ )) ** 0.5
return {"distance": distance}
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 262
| 0
|
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
A__ : Union[str, Any] = logging.getLogger(__name__)
A__ : List[str] = tf.data.AUTOTUNE
def a_ ( ) -> Tuple:
__snake_case : str = argparse.ArgumentParser(description='Train a masked language model on TPU.' )
parser.add_argument(
'--pretrained_model_config' ,type=_UpperCAmelCase ,default='roberta-base' ,help='The model config to use. Note that we don\'t copy the model\'s weights, only the config!' ,)
parser.add_argument(
'--tokenizer' ,type=_UpperCAmelCase ,default='unigram-tokenizer-wikitext' ,help='The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.' ,)
parser.add_argument(
'--per_replica_batch_size' ,type=_UpperCAmelCase ,default=8 ,help='Batch size per TPU core.' ,)
parser.add_argument(
'--no_tpu' ,action='store_true' ,help='If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.' ,)
parser.add_argument(
'--tpu_name' ,type=_UpperCAmelCase ,help='Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.' ,default='local' ,)
parser.add_argument(
'--tpu_zone' ,type=_UpperCAmelCase ,help='Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.' ,)
parser.add_argument(
'--gcp_project' ,type=_UpperCAmelCase ,help='Google cloud project name. Only used for non-Colab TPU nodes.' )
parser.add_argument(
'--bfloat16' ,action='store_true' ,help='Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.' ,)
parser.add_argument(
'--train_dataset' ,type=_UpperCAmelCase ,help='Path to training dataset to load. If the path begins with `gs://`'
' then the dataset will be loaded from a Google Cloud Storage bucket.' ,)
parser.add_argument(
'--shuffle_buffer_size' ,type=_UpperCAmelCase ,default=2**18 ,help='Size of the shuffle buffer (in samples)' ,)
parser.add_argument(
'--eval_dataset' ,type=_UpperCAmelCase ,help='Path to evaluation dataset to load. If the path begins with `gs://`'
' then the dataset will be loaded from a Google Cloud Storage bucket.' ,)
parser.add_argument(
'--num_epochs' ,type=_UpperCAmelCase ,default=1 ,help='Number of epochs to train for.' ,)
parser.add_argument(
'--learning_rate' ,type=_UpperCAmelCase ,default=1E-4 ,help='Learning rate to use for training.' ,)
parser.add_argument(
'--weight_decay_rate' ,type=_UpperCAmelCase ,default=1E-3 ,help='Weight decay rate to use for training.' ,)
parser.add_argument(
'--max_length' ,type=_UpperCAmelCase ,default=5_12 ,help='Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py' ,)
parser.add_argument(
'--mlm_probability' ,type=_UpperCAmelCase ,default=0.1_5 ,help='Fraction of tokens to mask during training.' ,)
parser.add_argument('--output_dir' ,type=_UpperCAmelCase ,required=_UpperCAmelCase ,help='Path to save model checkpoints to.' )
parser.add_argument('--hub_model_id' ,type=_UpperCAmelCase ,help='Model ID to upload to on the Hugging Face Hub.' )
__snake_case : Dict = parser.parse_args()
return args
def a_ ( _UpperCAmelCase : int ) -> str:
try:
if args.tpu_name:
__snake_case : List[str] = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name ,zone=args.tpu_zone ,project=args.gcp_project )
else:
__snake_case : Any = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
'Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or '
'--gcp_project. When running on a TPU VM, use --tpu_name local.' )
tf.config.experimental_connect_to_cluster(_UpperCAmelCase )
tf.tpu.experimental.initialize_tpu_system(_UpperCAmelCase )
return tpu
def a_ ( _UpperCAmelCase : int ) -> Optional[int]:
__snake_case : Union[str, Any] = 0
for file in file_list:
__snake_case : Optional[int] = file.split('/' )[-1]
__snake_case : Optional[Any] = re.search(r'-\d+-(\d+)\.tfrecord' ,_UpperCAmelCase ).group(1 )
__snake_case : Any = int(_UpperCAmelCase )
num_samples += sample_count
return num_samples
def a_ ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : int ,_UpperCAmelCase : Tuple=None ) -> Dict:
__snake_case : int = count_samples(_UpperCAmelCase )
__snake_case : Optional[Any] = tf.data.Dataset.from_tensor_slices(_UpperCAmelCase )
if shuffle:
__snake_case : Optional[Any] = dataset.shuffle(len(_UpperCAmelCase ) )
__snake_case : Dict = tf.data.TFRecordDataset(_UpperCAmelCase ,num_parallel_reads=_UpperCAmelCase )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
__snake_case : Any = dataset.apply(tf.data.experimental.assert_cardinality(_UpperCAmelCase ) )
__snake_case : Dict = dataset.map(_UpperCAmelCase ,num_parallel_calls=_UpperCAmelCase )
if shuffle:
assert shuffle_buffer_size is not None
__snake_case : Optional[Any] = dataset.shuffle(args.shuffle_buffer_size )
__snake_case : Any = dataset.batch(_UpperCAmelCase ,drop_remainder=_UpperCAmelCase )
__snake_case : int = dataset.map(_UpperCAmelCase ,num_parallel_calls=_UpperCAmelCase )
__snake_case : Optional[int] = dataset.prefetch(_UpperCAmelCase )
return dataset
def a_ ( _UpperCAmelCase : int ) -> List[Any]:
if not args.no_tpu:
__snake_case : Tuple = initialize_tpu(_UpperCAmelCase )
__snake_case : Optional[Any] = tf.distribute.TPUStrategy(_UpperCAmelCase )
else:
__snake_case : Optional[int] = tf.distribute.OneDeviceStrategy(device='/gpu:0' )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy('mixed_bfloat16' )
__snake_case : Optional[Any] = AutoTokenizer.from_pretrained(args.tokenizer )
__snake_case : int = AutoConfig.from_pretrained(args.pretrained_model_config )
__snake_case : List[Any] = tokenizer.vocab_size
__snake_case : Any = tf.io.gfile.glob(os.path.join(args.train_dataset ,'*.tfrecord' ) )
if not training_records:
raise ValueError(f'''No .tfrecord files found in {args.train_dataset}.''' )
__snake_case : List[str] = tf.io.gfile.glob(os.path.join(args.eval_dataset ,'*.tfrecord' ) )
if not eval_records:
raise ValueError(f'''No .tfrecord files found in {args.eval_dataset}.''' )
__snake_case : Optional[int] = count_samples(_UpperCAmelCase )
__snake_case : Optional[Any] = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
__snake_case : Dict = steps_per_epoch * args.num_epochs
with strategy.scope():
__snake_case : int = TFAutoModelForMaskedLM.from_config(_UpperCAmelCase )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
__snake_case : int = create_optimizer(
num_train_steps=_UpperCAmelCase ,num_warmup_steps=total_train_steps // 20 ,init_lr=args.learning_rate ,weight_decay_rate=args.weight_decay_rate ,)
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=_UpperCAmelCase ,metrics=['accuracy'] )
def decode_fn(_UpperCAmelCase : Optional[Any] ):
__snake_case : Optional[Any] = {
'input_ids': tf.io.FixedLenFeature(dtype=tf.intaa ,shape=(args.max_length,) ),
'attention_mask': tf.io.FixedLenFeature(dtype=tf.intaa ,shape=(args.max_length,) ),
}
return tf.io.parse_single_example(_UpperCAmelCase ,_UpperCAmelCase )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
__snake_case : Optional[Any] = DataCollatorForLanguageModeling(
tokenizer=_UpperCAmelCase ,mlm_probability=args.mlm_probability ,mlm=_UpperCAmelCase ,return_tensors='tf' )
def mask_with_collator(_UpperCAmelCase : List[str] ):
# TF really needs an isin() function
__snake_case : int = (
~tf.cast(batch['attention_mask'] ,tf.bool )
| (batch['input_ids'] == tokenizer.cls_token_id)
| (batch['input_ids'] == tokenizer.sep_token_id)
)
__snake_case : str = data_collator.tf_mask_tokens(
batch['input_ids'] ,vocab_size=len(_UpperCAmelCase ) ,mask_token_id=tokenizer.mask_token_id ,special_tokens_mask=_UpperCAmelCase ,)
return batch
__snake_case : int = args.per_replica_batch_size * strategy.num_replicas_in_sync
__snake_case : Optional[int] = prepare_dataset(
_UpperCAmelCase ,decode_fn=_UpperCAmelCase ,mask_fn=_UpperCAmelCase ,batch_size=_UpperCAmelCase ,shuffle=_UpperCAmelCase ,shuffle_buffer_size=args.shuffle_buffer_size ,)
__snake_case : List[Any] = prepare_dataset(
_UpperCAmelCase ,decode_fn=_UpperCAmelCase ,mask_fn=_UpperCAmelCase ,batch_size=_UpperCAmelCase ,shuffle=_UpperCAmelCase ,)
__snake_case : Tuple = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir ,hub_model_id=args.hub_model_id ,tokenizer=_UpperCAmelCase ) )
model.fit(
_UpperCAmelCase ,validation_data=_UpperCAmelCase ,epochs=args.num_epochs ,callbacks=_UpperCAmelCase ,)
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
A__ : List[str] = parse_args()
main(args)
| 720
|
'''simple docstring'''
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
A__ : int = {
'''<''': operator.lt,
'''<=''': operator.le,
'''==''': operator.eq,
'''!=''': operator.ne,
'''>=''': operator.ge,
'''>''': operator.gt,
}
def a_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : int ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : int ,_UpperCAmelCase : Dict ,_UpperCAmelCase : Union[str, Any] ) -> Any:
if got_ver is None or want_ver is None:
raise ValueError(
f'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
f''' reinstalling {pkg}.''' )
if not ops[op](version.parse(_UpperCAmelCase ) ,version.parse(_UpperCAmelCase ) ):
raise ImportError(
f'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' )
def a_ ( _UpperCAmelCase : str ,_UpperCAmelCase : Optional[str] = None ) -> None:
__snake_case : Optional[Any] = f'''\n{hint}''' if hint is not None else ''
# non-versioned check
if re.match(r'^[\w_\-\d]+$' ,_UpperCAmelCase ):
__snake_case , __snake_case , __snake_case : Optional[int] = requirement, None, None
else:
__snake_case : Dict = re.findall(r'^([^!=<>\s]+)([\s!=<>]{1,2}.+)' ,_UpperCAmelCase )
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'
f''' got {requirement}''' )
__snake_case , __snake_case : Tuple = match[0]
__snake_case : Tuple = want_full.split(',' ) # there could be multiple requirements
__snake_case : Any = {}
for w in want_range:
__snake_case : Optional[Any] = re.findall(r'^([\s!=<>]{1,2})(.+)' ,_UpperCAmelCase )
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'
f''' but got {requirement}''' )
__snake_case , __snake_case : str = match[0]
__snake_case : Optional[int] = want_ver
if op not in ops:
raise ValueError(f'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' )
# special case
if pkg == "python":
__snake_case : Any = '.'.join([str(_UpperCAmelCase ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
return
# check if any version is installed
try:
__snake_case : Dict = importlib.metadata.version(_UpperCAmelCase )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
def a_ ( _UpperCAmelCase : Tuple ) -> Optional[Any]:
__snake_case : Union[str, Any] = 'Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'
return require_version(_UpperCAmelCase ,_UpperCAmelCase )
| 124
| 0
|
'''simple docstring'''
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__lowercase : List[Any] = 16
__lowercase : Tuple = 32
def lowercase_ ( _lowercase ) -> Optional[int]:
'''simple docstring'''
return int(x / 2**20 )
class __lowercase :
def __enter__(self ):
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
lowerCamelCase_ : List[Any] = torch.cuda.memory_allocated()
return self
def __exit__(self , *A ):
gc.collect()
torch.cuda.empty_cache()
lowerCamelCase_ : Optional[int] = torch.cuda.memory_allocated()
lowerCamelCase_ : List[Any] = torch.cuda.max_memory_allocated()
lowerCamelCase_ : List[Any] = bamb(self.end - self.begin )
lowerCamelCase_ : Optional[Any] = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def lowercase_ ( _lowercase , _lowercase = 16 , _lowercase = "bert-base-cased" , _lowercase = 320 , _lowercase = 160 , ) -> str:
'''simple docstring'''
lowerCamelCase_ : Tuple = AutoTokenizer.from_pretrained(_lowercase )
lowerCamelCase_ : int = load_dataset(
'''glue''' , '''mrpc''' , split={'''train''': F"""train[:{n_train}]""", '''validation''': F"""validation[:{n_val}]"""} )
def tokenize_function(_lowercase ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase_ : Union[str, Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_lowercase , max_length=_lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCamelCase_ : Optional[int] = datasets.map(
_lowercase , batched=_lowercase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=_lowercase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase_ : Optional[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(_lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowercase , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(_lowercase , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
lowerCamelCase_ : Any = DataLoader(
tokenized_datasets['''train'''] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
lowerCamelCase_ : Optional[int] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
return train_dataloader, eval_dataloader
def lowercase_ ( _lowercase , _lowercase ) -> Any:
'''simple docstring'''
lowerCamelCase_ : List[str] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase_ : str = config['''lr''']
lowerCamelCase_ : Union[str, Any] = int(config['''num_epochs'''] )
lowerCamelCase_ : List[Any] = int(config['''seed'''] )
lowerCamelCase_ : Union[str, Any] = int(config['''batch_size'''] )
lowerCamelCase_ : Dict = args.model_name_or_path
set_seed(_lowercase )
lowerCamelCase_, lowerCamelCase_ : str = get_dataloaders(_lowercase , _lowercase , _lowercase , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase_ : List[Any] = AutoModelForSequenceClassification.from_pretrained(_lowercase , return_dict=_lowercase )
# Instantiate optimizer
lowerCamelCase_ : str = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCamelCase_ : List[str] = optimizer_cls(params=model.parameters() , lr=_lowercase )
if accelerator.state.deepspeed_plugin is not None:
lowerCamelCase_ : List[str] = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
lowerCamelCase_ : Any = 1
lowerCamelCase_ : Union[str, Any] = (len(_lowercase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCamelCase_ : Dict = get_linear_schedule_with_warmup(
optimizer=_lowercase , num_warmup_steps=0 , num_training_steps=_lowercase , )
else:
lowerCamelCase_ : Tuple = DummyScheduler(_lowercase , total_num_steps=_lowercase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : Union[str, Any] = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# We need to keep track of how many total steps we have iterated over
lowerCamelCase_ : List[str] = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCamelCase_ : List[Any] = 0
# Now we train the model
lowerCamelCase_ : Dict = {}
for epoch in range(_lowercase , _lowercase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(_lowercase ):
lowerCamelCase_ : int = model(**_lowercase )
lowerCamelCase_ : List[Any] = outputs.loss
lowerCamelCase_ : int = loss / gradient_accumulation_steps
accelerator.backward(_lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('''Memory before entering the train : {}'''.format(bamb(tracemalloc.begin ) ) )
accelerator.print('''Memory consumed at the end of the train (end-begin): {}'''.format(tracemalloc.used ) )
accelerator.print('''Peak Memory consumed during the train (max-begin): {}'''.format(tracemalloc.peaked ) )
accelerator.print(
'''Total Peak Memory consumed during the train (max): {}'''.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
lowerCamelCase_ : List[Any] = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''peak_memory_utilization.json''' ) , '''w''' ) as f:
json.dump(_lowercase , _lowercase )
def lowercase_ ( ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=_lowercase , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=_lowercase , )
parser.add_argument(
'''--output_dir''' , type=_lowercase , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--peak_memory_upper_bound''' , type=_lowercase , default=_lowercase , help='''The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.''' , )
parser.add_argument(
'''--n_train''' , type=_lowercase , default=320 , help='''Number of training examples to use.''' , )
parser.add_argument(
'''--n_val''' , type=_lowercase , default=160 , help='''Number of validation examples to use.''' , )
parser.add_argument(
'''--num_epochs''' , type=_lowercase , default=1 , help='''Number of train epochs.''' , )
lowerCamelCase_ : str = parser.parse_args()
lowerCamelCase_ : str = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(_lowercase , _lowercase )
if __name__ == "__main__":
main()
| 422
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __lowercase ( unittest.TestCase ):
@slow
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = TFXLMRobertaModel.from_pretrained('''jplu/tf-xlm-roberta-base''' )
lowerCamelCase_ : Tuple = {
'''input_ids''': tf.convert_to_tensor([[0, 2_6_4_6, 1_0_2_6_9, 8_3, 9_9_9_4_2, 2]] , dtype=tf.intaa ), # "My dog is cute"
'''attention_mask''': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
lowerCamelCase_ : int = model(A )['''last_hidden_state''']
lowerCamelCase_ : List[Any] = tf.TensorShape((1, 6, 7_6_8) )
self.assertEqual(output.shape , A )
# compare the actual values for a slice.
lowerCamelCase_ : Dict = tf.convert_to_tensor(
[
[
[0.0_68_17_62, 0.10_89_44_51, 0.06_77_25_04],
[-0.06_42_36_68, 0.02_36_66_15, 0.04_32_93_44],
[-0.06_05_72_95, 0.09_97_41_35, -0.00_07_05_84],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 422
| 1
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , ) -> Any:
_lowercase : List[Any] = {}
if train_file is not None:
_lowercase : str = [train_file]
if eval_file is not None:
_lowercase : List[Any] = [eval_file]
if test_file is not None:
_lowercase : Union[str, Any] = [test_file]
_lowercase : str = datasets.load_dataset('csv' , data_files=lowerCamelCase_ )
_lowercase : str = list(ds[list(files.keys() )[0]].features.keys() )
_lowercase : str = features_name.pop(lowerCamelCase_ )
_lowercase : Optional[int] = list(set(ds[list(files.keys() )[0]][label_name] ) )
_lowercase : int = {label: i for i, label in enumerate(lowerCamelCase_ )}
_lowercase : Union[str, Any] = tokenizer.model_input_names
_lowercase : Dict = {}
if len(lowerCamelCase_ ) == 1:
for k in files.keys():
_lowercase : Tuple = ds[k].map(
lambda lowerCamelCase_ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , padding='max_length' ) , batched=lowerCamelCase_ , )
elif len(lowerCamelCase_ ) == 2:
for k in files.keys():
_lowercase : str = ds[k].map(
lambda lowerCamelCase_ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , padding='max_length' , ) , batched=lowerCamelCase_ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
_lowercase : Tuple = {k: v for k, v in ex.items() if k in input_names}
_lowercase : str = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
_lowercase : List[str] = {k: v for k, v in ex.items() if k in input_names}
_lowercase : str = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
_lowercase : Dict = {k: v for k, v in ex.items() if k in input_names}
_lowercase : int = labelaid[ex[label_name]]
yield (d, label)
_lowercase : int = (
tf.data.Dataset.from_generator(
lowerCamelCase_ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
_lowercase : Union[str, Any] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
_lowercase : Any = (
tf.data.Dataset.from_generator(
lowerCamelCase_ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
_lowercase : Union[str, Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
_lowercase : List[str] = (
tf.data.Dataset.from_generator(
lowerCamelCase_ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
_lowercase : Optional[int] = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
SCREAMING_SNAKE_CASE : List[str] = logging.getLogger(__name__)
@dataclass
class _lowerCamelCase:
lowercase_ : int = field(metadata={"""help""": """Which column contains the label"""} )
lowercase_ : str = field(default=_a, metadata={"""help""": """The path of the training file"""} )
lowercase_ : Optional[str] = field(default=_a, metadata={"""help""": """The path of the development file"""} )
lowercase_ : Optional[str] = field(default=_a, metadata={"""help""": """The path of the test file"""} )
lowercase_ : int = field(
default=1_28, metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
}, )
lowercase_ : bool = field(
default=_a, metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
@dataclass
class _lowerCamelCase:
lowercase_ : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowercase_ : bool = field(default=_a, metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""}, )
def UpperCamelCase_( ) -> Dict:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_lowercase : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
_lowercase , _lowercase , _lowercase : List[Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(
F'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
F'''16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowercase : Dict = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowercase , _lowercase , _lowercase , _lowercase : Dict = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=lowerCamelCase_ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
_lowercase : Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(lowerCamelCase_ ) , labelaid=lowerCamelCase_ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='text-classification' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
_lowercase : Union[str, Any] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('.bin' in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , )
def compute_metrics(lowerCamelCase_ ) -> Dict:
_lowercase : Dict = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
_lowercase : int = TFTrainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=lowerCamelCase_ , eval_dataset=lowerCamelCase_ , compute_metrics=lowerCamelCase_ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_lowercase : Optional[int] = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_lowercase : Union[str, Any] = trainer.evaluate()
_lowercase : Union[str, Any] = os.path.join(training_args.output_dir , 'eval_results.txt' )
with open(lowerCamelCase_ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(F''' {key} = {value}''' )
writer.write(F'''{key} = {value}\n''' )
results.update(lowerCamelCase_ )
return results
if __name__ == "__main__":
main()
| 354
|
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase=2, lowerCamelCase=8, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=99, lowerCamelCase=16, lowerCamelCase=5, lowerCamelCase=2, lowerCamelCase=36, lowerCamelCase="gelu", lowerCamelCase=0.0, lowerCamelCase=0.0, lowerCamelCase=5_12, lowerCamelCase=16, lowerCamelCase=2, lowerCamelCase=0.0_2, lowerCamelCase=3, lowerCamelCase=4, lowerCamelCase=None, ) -> Dict:
"""simple docstring"""
_lowercase : Union[str, Any] = parent
_lowercase : List[str] = batch_size
_lowercase : Dict = seq_length
_lowercase : List[str] = is_training
_lowercase : Any = use_input_mask
_lowercase : Any = use_token_type_ids
_lowercase : Optional[int] = use_labels
_lowercase : str = vocab_size
_lowercase : str = hidden_size
_lowercase : str = num_hidden_layers
_lowercase : List[Any] = num_attention_heads
_lowercase : Union[str, Any] = intermediate_size
_lowercase : Tuple = hidden_act
_lowercase : Optional[int] = hidden_dropout_prob
_lowercase : Optional[Any] = attention_probs_dropout_prob
_lowercase : int = max_position_embeddings
_lowercase : str = type_vocab_size
_lowercase : Tuple = type_sequence_label_size
_lowercase : Optional[Any] = initializer_range
_lowercase : Dict = num_labels
_lowercase : Dict = num_choices
_lowercase : int = scope
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : str = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowercase : List[Any] = None
if self.use_input_mask:
_lowercase : List[str] = random_attention_mask([self.batch_size, self.seq_length])
_lowercase : List[Any] = None
if self.use_token_type_ids:
_lowercase : Dict = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
_lowercase : Optional[Any] = None
_lowercase : Optional[int] = None
_lowercase : Dict = None
if self.use_labels:
_lowercase : Tuple = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowercase : Tuple = ids_tensor([self.batch_size], self.num_choices)
_lowercase : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return MraConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase, initializer_range=self.initializer_range, )
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Any = self.get_config()
_lowercase : List[str] = 3_00
return config
def UpperCamelCase ( self) -> int:
"""simple docstring"""
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : List[str] = self.prepare_config_and_inputs()
_lowercase : Tuple = True
_lowercase : Any = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
_lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : Dict = MraModel(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Dict = model(lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase)
_lowercase : Optional[Any] = model(lowerCamelCase, token_type_ids=lowerCamelCase)
_lowercase : Tuple = model(lowerCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> Optional[Any]:
"""simple docstring"""
_lowercase : int = True
_lowercase : List[Any] = MraModel(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Tuple = model(
lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, encoder_hidden_states=lowerCamelCase, encoder_attention_mask=lowerCamelCase, )
_lowercase : Union[str, Any] = model(
lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, encoder_hidden_states=lowerCamelCase, )
_lowercase : Any = model(lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : Optional[Any] = MraForMaskedLM(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : int = model(lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase : Optional[Any] = MraForQuestionAnswering(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Union[str, Any] = model(
lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> str:
"""simple docstring"""
_lowercase : str = self.num_labels
_lowercase : List[Any] = MraForSequenceClassification(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[int] = model(lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> str:
"""simple docstring"""
_lowercase : Optional[Any] = self.num_labels
_lowercase : str = MraForTokenClassification(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[int] = model(lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> int:
"""simple docstring"""
_lowercase : Any = self.num_choices
_lowercase : Optional[Any] = MraForMultipleChoice(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : List[Any] = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : str = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : Dict = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : Dict = model(
lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : List[Any] = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Tuple = config_and_inputs
_lowercase : Optional[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _lowerCamelCase( _a, unittest.TestCase ):
lowercase_ : Optional[int] = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase_ : Optional[int] = False
lowercase_ : Optional[int] = False
lowercase_ : int = False
lowercase_ : Optional[Any] = False
lowercase_ : Any = ()
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : List[str] = MraModelTester(self)
_lowercase : Any = ConfigTester(self, config_class=lowerCamelCase, hidden_size=37)
def UpperCamelCase ( self) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase)
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowercase : Optional[int] = type
self.model_tester.create_and_check_model(*lowerCamelCase)
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase)
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase)
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase)
@slow
def UpperCamelCase ( self) -> str:
"""simple docstring"""
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : List[str] = MraModel.from_pretrained(lowerCamelCase)
self.assertIsNotNone(lowerCamelCase)
@unittest.skip(reason='MRA does not output attentions')
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
return
@require_torch
class _lowerCamelCase( unittest.TestCase ):
@slow
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Dict = MraModel.from_pretrained('uw-madison/mra-base-512-4')
_lowercase : Any = torch.arange(2_56).unsqueeze(0)
with torch.no_grad():
_lowercase : str = model(lowerCamelCase)[0]
_lowercase : Union[str, Any] = torch.Size((1, 2_56, 7_68))
self.assertEqual(output.shape, lowerCamelCase)
_lowercase : Dict = torch.tensor(
[[[-0.0_1_4_0, 0.0_8_3_0, -0.0_3_8_1], [0.1_5_4_6, 0.1_4_0_2, 0.0_2_2_0], [0.1_1_6_2, 0.0_8_5_1, 0.0_1_6_5]]])
self.assertTrue(torch.allclose(output[:, :3, :3], lowerCamelCase, atol=1E-4))
@slow
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : List[str] = MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4')
_lowercase : Optional[int] = torch.arange(2_56).unsqueeze(0)
with torch.no_grad():
_lowercase : str = model(lowerCamelCase)[0]
_lowercase : Union[str, Any] = 5_02_65
_lowercase : int = torch.Size((1, 2_56, vocab_size))
self.assertEqual(output.shape, lowerCamelCase)
_lowercase : Optional[Any] = torch.tensor(
[[[9.2_5_9_5, -3.6_0_3_8, 1_1.8_8_1_9], [9.3_8_6_9, -3.2_6_9_3, 1_1.0_9_5_6], [1_1.8_5_2_4, -3.4_9_3_8, 1_3.1_2_1_0]]])
self.assertTrue(torch.allclose(output[:, :3, :3], lowerCamelCase, atol=1E-4))
@slow
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : Any = MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3')
_lowercase : Optional[int] = torch.arange(40_96).unsqueeze(0)
with torch.no_grad():
_lowercase : str = model(lowerCamelCase)[0]
_lowercase : Optional[Any] = 5_02_65
_lowercase : Union[str, Any] = torch.Size((1, 40_96, vocab_size))
self.assertEqual(output.shape, lowerCamelCase)
_lowercase : Any = torch.tensor(
[[[5.4_7_8_9, -2.3_5_6_4, 7.5_0_6_4], [7.9_0_6_7, -1.3_3_6_9, 9.9_6_6_8], [9.0_7_1_2, -1.8_1_0_6, 7.0_3_8_0]]])
self.assertTrue(torch.allclose(output[:, :3, :3], lowerCamelCase, atol=1E-4))
| 354
| 1
|
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ) -> Union[str, Any]:
assert x is not None
assert y is not None
__A : str = len(a__ )
__A : Tuple = len(a__ )
# declaring the array for storing the dp values
__A : Dict = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 ,m + 1 ):
for j in range(1 ,n + 1 ):
__A : str = 1 if x[i - 1] == y[j - 1] else 0
__A : str = max(l[i - 1][j] ,l[i][j - 1] ,l[i - 1][j - 1] + match )
__A : Dict = """"""
__A , __A : List[Any] = m, n
while i > 0 and j > 0:
__A : Optional[int] = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
__A : Dict = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = '''AGGTAB'''
UpperCAmelCase_ : Optional[Any] = '''GXTXAYB'''
UpperCAmelCase_ : Union[str, Any] = 4
UpperCAmelCase_ : Union[str, Any] = '''GTAB'''
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = longest_common_subsequence(a, b)
print('''len =''', ln, ''', sub-sequence =''', subseq)
import doctest
doctest.testmod()
| 17
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : int = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 17
| 1
|
"""simple docstring"""
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> float:
"""simple docstring"""
_UpperCamelCase : int = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def lowercase__ ( ) -> Any:
"""simple docstring"""
print(sum_of_series(1 ,1 ,10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717
|
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def lowercase__ ( lowercase_ ,lowercase_=7 ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : Optional[int] = None
if token is not None:
_UpperCamelCase : Optional[Any] = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
# The id of a workflow (not of a workflow run)
_UpperCamelCase : Any = "636036"
_UpperCamelCase : Tuple = F'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'''
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'''
_UpperCamelCase : Dict = requests.get(lowercase_ ,headers=lowercase_ ).json()
return result["workflow_runs"]
def lowercase__ ( lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : List[Any] = get_daily_ci_runs(lowercase_ )
_UpperCamelCase : Tuple = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
_UpperCamelCase : Union[str, Any] = workflow_run["id"]
break
return workflow_run_id
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : str = get_last_daily_ci_runs(lowercase_ )
if workflow_run_id is not None:
_UpperCamelCase : int = get_artifacts_links(worflow_run_id=lowercase_ ,token=lowercase_ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
_UpperCamelCase : Dict = artifacts_links[artifact_name]
download_artifact(
artifact_name=lowercase_ ,artifact_url=lowercase_ ,output_dir=lowercase_ ,token=lowercase_ )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> int:
"""simple docstring"""
get_last_daily_ci_artifacts(lowercase_ ,lowercase_ ,lowercase_ )
_UpperCamelCase : Dict = {}
for artifact_name in artifact_names:
_UpperCamelCase : Union[str, Any] = os.path.join(lowercase_ ,F'''{artifact_name}.zip''' )
if os.path.isfile(lowercase_ ):
_UpperCamelCase : int = {}
with zipfile.ZipFile(lowercase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowercase_ ):
# read the file
with z.open(lowercase_ ) as f:
_UpperCamelCase : int = f.read().decode("UTF-8" )
return results
| 51
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def snake_case__ ( self : List[Any] , a__ : int , a__ : int ):
__magic_name__ = jnp.ones((batch_size, length) ) / length
return scores
def snake_case__ ( self : List[str] ):
__magic_name__ = None
__magic_name__ = 20
__magic_name__ = self._get_uniform_logits(batch_size=2 , length=a__ )
# tweak scores to not be uniform anymore
__magic_name__ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
__magic_name__ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
__magic_name__ = jax.nn.softmax(a__ , axis=-1 )
__magic_name__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
__magic_name__ = FlaxTemperatureLogitsWarper(temperature=1.3 )
__magic_name__ = jax.nn.softmax(temp_dist_warper_sharper(a__ , scores.copy() , cur_len=a__ ) , axis=-1 )
__magic_name__ = jax.nn.softmax(temp_dist_warper_smoother(a__ , scores.copy() , cur_len=a__ ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def snake_case__ ( self : Union[str, Any] ):
__magic_name__ = None
__magic_name__ = 10
__magic_name__ = 2
# create ramp distribution
__magic_name__ = np.broadcast_to(np.arange(a__ )[None, :] , (batch_size, vocab_size) ).copy()
__magic_name__ = ramp_logits[1:, : vocab_size // 2] + vocab_size
__magic_name__ = FlaxTopKLogitsWarper(3 )
__magic_name__ = top_k_warp(a__ , a__ , cur_len=a__ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
__magic_name__ = 5
__magic_name__ = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
__magic_name__ = np.broadcast_to(np.arange(a__ )[None, :] , (batch_size, length) ).copy()
__magic_name__ = top_k_warp_safety_check(a__ , a__ , cur_len=a__ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def snake_case__ ( self : Tuple ):
__magic_name__ = None
__magic_name__ = 10
__magic_name__ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
__magic_name__ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
__magic_name__ = FlaxTopPLogitsWarper(0.8 )
__magic_name__ = np.exp(top_p_warp(a__ , a__ , cur_len=a__ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
__magic_name__ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(a__ , a__ , atol=1E-3 ) )
# check edge cases with negative and extreme logits
__magic_name__ = np.broadcast_to(np.arange(a__ )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
__magic_name__ = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
__magic_name__ = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
__magic_name__ = top_p_warp(a__ , a__ , cur_len=a__ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def snake_case__ ( self : int ):
__magic_name__ = 20
__magic_name__ = 4
__magic_name__ = 0
__magic_name__ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=a__ )
# check that min length is applied at length 5
__magic_name__ = ids_tensor((batch_size, 20) , vocab_size=20 )
__magic_name__ = 5
__magic_name__ = self._get_uniform_logits(a__ , a__ )
__magic_name__ = min_dist_processor(a__ , a__ , cur_len=a__ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
__magic_name__ = self._get_uniform_logits(a__ , a__ )
__magic_name__ = 15
__magic_name__ = min_dist_processor(a__ , a__ , cur_len=a__ )
self.assertFalse(jnp.isinf(a__ ).any() )
def snake_case__ ( self : str ):
__magic_name__ = 20
__magic_name__ = 4
__magic_name__ = 0
__magic_name__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=a__ )
# check that all scores are -inf except the bos_token_id score
__magic_name__ = ids_tensor((batch_size, 1) , vocab_size=20 )
__magic_name__ = 1
__magic_name__ = self._get_uniform_logits(a__ , a__ )
__magic_name__ = logits_processor(a__ , a__ , cur_len=a__ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
__magic_name__ = 3
__magic_name__ = self._get_uniform_logits(a__ , a__ )
__magic_name__ = logits_processor(a__ , a__ , cur_len=a__ )
self.assertFalse(jnp.isinf(a__ ).any() )
def snake_case__ ( self : List[Any] ):
__magic_name__ = 20
__magic_name__ = 4
__magic_name__ = 0
__magic_name__ = 5
__magic_name__ = FlaxForcedEOSTokenLogitsProcessor(max_length=a__ , eos_token_id=a__ )
# check that all scores are -inf except the eos_token_id when max_length is reached
__magic_name__ = ids_tensor((batch_size, 4) , vocab_size=20 )
__magic_name__ = 4
__magic_name__ = self._get_uniform_logits(a__ , a__ )
__magic_name__ = logits_processor(a__ , a__ , cur_len=a__ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
__magic_name__ = 3
__magic_name__ = self._get_uniform_logits(a__ , a__ )
__magic_name__ = logits_processor(a__ , a__ , cur_len=a__ )
self.assertFalse(jnp.isinf(a__ ).any() )
def snake_case__ ( self : Optional[int] ):
__magic_name__ = 4
__magic_name__ = 10
__magic_name__ = 15
__magic_name__ = 2
__magic_name__ = 1
__magic_name__ = 15
# dummy input_ids and scores
__magic_name__ = ids_tensor((batch_size, sequence_length) , a__ )
__magic_name__ = input_ids.copy()
__magic_name__ = self._get_uniform_logits(a__ , a__ )
__magic_name__ = scores.copy()
# instantiate all dist processors
__magic_name__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
__magic_name__ = FlaxTopKLogitsWarper(3 )
__magic_name__ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
__magic_name__ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=a__ )
__magic_name__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=a__ )
__magic_name__ = FlaxForcedEOSTokenLogitsProcessor(max_length=a__ , eos_token_id=a__ )
__magic_name__ = 10
# no processor list
__magic_name__ = temp_dist_warp(a__ , a__ , cur_len=a__ )
__magic_name__ = top_k_warp(a__ , a__ , cur_len=a__ )
__magic_name__ = top_p_warp(a__ , a__ , cur_len=a__ )
__magic_name__ = min_dist_proc(a__ , a__ , cur_len=a__ )
__magic_name__ = bos_dist_proc(a__ , a__ , cur_len=a__ )
__magic_name__ = eos_dist_proc(a__ , a__ , cur_len=a__ )
# with processor list
__magic_name__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
__magic_name__ = processor(a__ , a__ , cur_len=a__ )
# scores should be equal
self.assertTrue(jnp.allclose(a__ , a__ , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def snake_case__ ( self : Optional[Any] ):
__magic_name__ = 4
__magic_name__ = 10
__magic_name__ = 15
__magic_name__ = 2
__magic_name__ = 1
__magic_name__ = 15
# dummy input_ids and scores
__magic_name__ = ids_tensor((batch_size, sequence_length) , a__ )
__magic_name__ = input_ids.copy()
__magic_name__ = self._get_uniform_logits(a__ , a__ )
__magic_name__ = scores.copy()
# instantiate all dist processors
__magic_name__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
__magic_name__ = FlaxTopKLogitsWarper(3 )
__magic_name__ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
__magic_name__ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=a__ )
__magic_name__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=a__ )
__magic_name__ = FlaxForcedEOSTokenLogitsProcessor(max_length=a__ , eos_token_id=a__ )
__magic_name__ = 10
# no processor list
def run_no_processor_list(a__ : Any , a__ : List[Any] , a__ : Any ):
__magic_name__ = temp_dist_warp(a__ , a__ , cur_len=a__ )
__magic_name__ = top_k_warp(a__ , a__ , cur_len=a__ )
__magic_name__ = top_p_warp(a__ , a__ , cur_len=a__ )
__magic_name__ = min_dist_proc(a__ , a__ , cur_len=a__ )
__magic_name__ = bos_dist_proc(a__ , a__ , cur_len=a__ )
__magic_name__ = eos_dist_proc(a__ , a__ , cur_len=a__ )
return scores
# with processor list
def run_processor_list(a__ : str , a__ : str , a__ : Any ):
__magic_name__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
__magic_name__ = processor(a__ , a__ , cur_len=a__ )
return scores
__magic_name__ = jax.jit(a__ )
__magic_name__ = jax.jit(a__ )
__magic_name__ = jitted_run_no_processor_list(a__ , a__ , a__ )
__magic_name__ = jitted_run_processor_list(a__ , a__ , a__ )
# scores should be equal
self.assertTrue(jnp.allclose(a__ , a__ , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 432
|
'''simple docstring'''
def UpperCamelCase ( a ) -> str:
'''simple docstring'''
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 432
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a = {"configuration_swin": ["SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwinConfig", "SwinOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwinForImageClassification",
"SwinForMaskedImageModeling",
"SwinModel",
"SwinPreTrainedModel",
"SwinBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSwinForImageClassification",
"TFSwinForMaskedImageModeling",
"TFSwinModel",
"TFSwinPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 175
|
import os
import sys
import unittest
a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
a = os.path.join(git_repo_path, "src", "transformers")
a = "\n{0} = None\n"
a = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n"
a = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
class _A ( unittest.TestCase ):
def UpperCAmelCase ( self ):
_UpperCAmelCase = find_backend(""" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")""" )
self.assertIsNone(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = find_backend(""" if not is_tokenizers_available():""" )
self.assertEqual(_SCREAMING_SNAKE_CASE , """tokenizers""" )
_UpperCAmelCase = find_backend(""" if not is_tensorflow_text_available():""" )
self.assertEqual(_SCREAMING_SNAKE_CASE , """tensorflow_text""" )
_UpperCAmelCase = find_backend(""" if not (is_sentencepiece_available() and is_tokenizers_available()):""" )
self.assertEqual(_SCREAMING_SNAKE_CASE , """sentencepiece_and_tokenizers""" )
_UpperCAmelCase = find_backend(
""" if not (is_sentencepiece_available() and is_tensorflow_text_available()):""" )
self.assertEqual(_SCREAMING_SNAKE_CASE , """sentencepiece_and_tensorflow_text""" )
_UpperCAmelCase = find_backend(
""" if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):""" )
self.assertEqual(_SCREAMING_SNAKE_CASE , """sentencepiece_and_tokenizers_and_vision""" )
def UpperCAmelCase ( self ):
_UpperCAmelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""" , _SCREAMING_SNAKE_CASE )
self.assertIn("""tensorflow_text""" , _SCREAMING_SNAKE_CASE )
self.assertIn("""sentencepiece_and_tokenizers""" , _SCREAMING_SNAKE_CASE )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""BertModel""" , objects["""torch"""] )
self.assertIn("""TFBertModel""" , objects["""tf"""] )
self.assertIn("""FlaxBertModel""" , objects["""flax"""] )
self.assertIn("""BertModel""" , objects["""torch"""] )
self.assertIn("""TFBertTokenizer""" , objects["""tensorflow_text"""] )
self.assertIn("""convert_slow_tokenizer""" , objects["""sentencepiece_and_tokenizers"""] )
def UpperCAmelCase ( self ):
_UpperCAmelCase = create_dummy_object("""CONSTANT""" , """'torch'""" )
self.assertEqual(_SCREAMING_SNAKE_CASE , """\nCONSTANT = None\n""" )
_UpperCAmelCase = create_dummy_object("""function""" , """'torch'""" )
self.assertEqual(
_SCREAMING_SNAKE_CASE , """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
_UpperCAmelCase = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
"""
_UpperCAmelCase = create_dummy_object("""FakeClass""" , """'torch'""" )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
_UpperCAmelCase = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
"""
_UpperCAmelCase = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""] , _SCREAMING_SNAKE_CASE )
| 175
| 1
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
UpperCamelCase_ = logging.getLogger(__name__)
torch.set_grad_enabled(False)
UpperCamelCase_ = """cuda""" if torch.cuda.is_available() else """cpu"""
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : Tuple=100 , __magic_name__ : Optional[int]=" " ) -> List[str]:
lowercase : List[Any] =text.split(__magic_name__ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__magic_name__ ) , __magic_name__ )]
def _lowerCAmelCase ( __magic_name__ : dict ) -> dict:
lowercase , lowercase : int =[], []
for title, text in zip(documents['''title'''] , documents['''text'''] ):
if text is not None:
for passage in split_text(__magic_name__ ):
titles.append(title if title is not None else '''''' )
texts.append(__magic_name__ )
return {"title": titles, "text": texts}
def _lowerCAmelCase ( __magic_name__ : dict , __magic_name__ : DPRContextEncoder , __magic_name__ : DPRContextEncoderTokenizerFast ) -> dict:
lowercase : Dict =ctx_tokenizer(
documents['''title'''] , documents['''text'''] , truncation=__magic_name__ , padding='''longest''' , return_tensors='''pt''' )['''input_ids''']
lowercase : Optional[int] =ctx_encoder(input_ids.to(device=__magic_name__ ) , return_dict=__magic_name__ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def _lowerCAmelCase ( __magic_name__ : "RagExampleArguments" , __magic_name__ : "ProcessingArguments" , __magic_name__ : "IndexHnswArguments" , ) -> str:
######################################
logger.info('''Step 1 - Create the dataset''' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowercase : Tuple =load_dataset(
'''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowercase : Optional[int] =dataset.map(__magic_name__ , batched=__magic_name__ , num_proc=processing_args.num_proc )
# And compute the embeddings
lowercase : Any =DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__magic_name__ )
lowercase : Any =DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
lowercase : Optional[int] =Features(
{'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space
lowercase : Optional[Any] =dataset.map(
partial(__magic_name__ , ctx_encoder=__magic_name__ , ctx_tokenizer=__magic_name__ ) , batched=__magic_name__ , batch_size=processing_args.batch_size , features=__magic_name__ , )
# And finally save your dataset
lowercase : Optional[Any] =os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''' )
dataset.save_to_disk(__magic_name__ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('''Step 2 - Index the dataset''' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowercase : Union[str, Any] =faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('''embeddings''' , custom_index=__magic_name__ )
# And save the index
lowercase : Dict =os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''' )
dataset.get_index('''embeddings''' ).save(__magic_name__ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = field(
default=str(Path(lowercase__ ).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv' ) , metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''} , )
lowerCamelCase_ = field(
default=lowercase__ , metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'} , )
lowerCamelCase_ = field(
default='facebook/rag-sequence-nq' , metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''} , )
lowerCamelCase_ = field(
default='facebook/dpr-ctx_encoder-multiset-base' , metadata={
'help': (
'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'
' \'facebook/dpr-ctx_encoder-multiset-base\''
)
} , )
lowerCamelCase_ = field(
default=str(Path(lowercase__ ).parent / 'test_run' / 'dummy-kb' ) , metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = field(
default=lowercase__ , metadata={
'help': 'The number of processes to use to split the documents into passages. Default is single process.'
} , )
lowerCamelCase_ = field(
default=16 , metadata={
'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.'
} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = field(
default=7_68 , metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'} , )
lowerCamelCase_ = field(
default=1_28 , metadata={
'help': (
'The number of bi-directional links created for every new element during the HNSW index construction.'
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
UpperCamelCase_ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
UpperCamelCase_ = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 92
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
def __snake_case ( UpperCamelCase__ ) -> int:
"""simple docstring"""
if not postfix_notation:
return 0
A = {'+', '-', '*', '/'}
A = []
for token in postfix_notation:
if token in operations:
A , A = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(UpperCamelCase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 690
| 0
|
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class lowercase ( unittest.TestCase):
@slow
def a_ ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(_lowerCamelCase ):
A_ : List[str] = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
A_ : Dict = FlaxAutoModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
@slow
def a_ ( self : List[str] ):
"""simple docstring"""
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(_lowerCamelCase ):
A_ : int = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
A_ : Optional[int] = FlaxAutoModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
@slow
def a_ ( self : Optional[Any] ):
"""simple docstring"""
for model_name in ["bert-base-cased", "bert-large-uncased"]:
A_ : List[str] = AutoTokenizer.from_pretrained(_lowerCamelCase )
A_ : List[str] = FlaxBertModel.from_pretrained(_lowerCamelCase )
A_ : Dict = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**_lowerCamelCase : Optional[int] ):
return model(**_lowerCamelCase )
eval(**_lowerCamelCase ).block_until_ready()
@slow
def a_ ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in ["roberta-base", "roberta-large"]:
A_ : Optional[Any] = AutoTokenizer.from_pretrained(_lowerCamelCase )
A_ : Tuple = FlaxRobertaModel.from_pretrained(_lowerCamelCase )
A_ : Tuple = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**_lowerCamelCase : Optional[int] ):
return model(**_lowerCamelCase )
eval(**_lowerCamelCase ).block_until_ready()
def a_ ( self : str ):
"""simple docstring"""
with self.assertRaisesRegex(
_lowerCamelCase , '''bert-base is not a local folder and is not a valid model identifier''' ):
A_ : Optional[int] = FlaxAutoModel.from_pretrained('''bert-base''' )
def a_ ( self : Any ):
"""simple docstring"""
with self.assertRaisesRegex(
_lowerCamelCase , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
A_ : Optional[int] = FlaxAutoModel.from_pretrained(_lowerCamelCase , revision='''aaaaaa''' )
def a_ ( self : Dict ):
"""simple docstring"""
with self.assertRaisesRegex(
_lowerCamelCase , '''hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack''' , ):
A_ : Union[str, Any] = FlaxAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' )
def a_ ( self : Dict ):
"""simple docstring"""
with self.assertRaisesRegex(_lowerCamelCase , '''Use `from_pt=True` to load this model''' ):
A_ : str = FlaxAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
| 708
|
"""simple docstring"""
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
_lowerCamelCase : int = logging.getLogger(__name__)
_lowerCamelCase : List[Any] = 'pytorch_model.bin'
@dataclasses.dataclass
class lowercase :
__lowerCAmelCase : str = dataclasses.field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models."""})
__lowerCAmelCase : Optional[str] = dataclasses.field(
default=__UpperCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co."""} , )
@dataclasses.dataclass
class lowercase :
__lowerCAmelCase : str = dataclasses.field(metadata={"""help""": """A csv or a json file containing the training data."""})
__lowerCAmelCase : str = dataclasses.field(metadata={"""help""": """A csv or a json file containing the data to predict on."""})
__lowerCAmelCase : Optional[str] = dataclasses.field(
default=__UpperCAmelCase , metadata={"""help""": """A csv or a json file containing the validation data."""})
__lowerCAmelCase : Optional[str] = dataclasses.field(
default=__UpperCAmelCase , metadata={"""help""": """The name of the task to train on."""} , )
__lowerCAmelCase : Optional[List[str]] = dataclasses.field(
default=__UpperCAmelCase , metadata={"""help""": """The list of labels for the task."""})
@dataclasses.dataclass
class lowercase :
__lowerCAmelCase : str = dataclasses.field(
metadata={"""help""": """The output directory where the model predictions and checkpoints will be written."""})
__lowerCAmelCase : Optional[str] = dataclasses.field(
default="""accuracy""" , metadata={"""help""": """The evaluation metric used for the task."""})
__lowerCAmelCase : Optional[str] = dataclasses.field(
default="""no""" , metadata={
"""help""": """The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"""
} , )
__lowerCAmelCase : Optional[int] = dataclasses.field(
default=10 , metadata={"""help""": """Number of evaluation calls with no improvement after which training will be stopped."""} , )
__lowerCAmelCase : Optional[float] = dataclasses.field(
default=0.0 , metadata={
"""help""": """How much the specified evaluation metric must improve to satisfy early stopping conditions."""
} , )
__lowerCAmelCase : Optional[bool] = dataclasses.field(
default=__UpperCAmelCase , metadata={"""help""": """Whether to filter the pseudo-labeled data based on the confidence score."""} , )
__lowerCAmelCase : Optional[bool] = dataclasses.field(
default=__UpperCAmelCase , metadata={"""help""": """Whether to filter the pseudo-labeled data based on the validation performance."""} , )
__lowerCAmelCase : Optional[bool] = dataclasses.field(
default=__UpperCAmelCase , metadata={"""help""": """Whether to fine-tune on labeled data after pseudo training."""} , )
__lowerCAmelCase : Optional[float] = dataclasses.field(
default=0.0 , metadata={"""help""": """Confidence threshold for pseudo-labeled data filtering."""} , )
__lowerCAmelCase : Optional[int] = dataclasses.field(
default=100 , metadata={"""help""": """Number of evaluation calls with no improvement after which training will be stopped."""} , )
__lowerCAmelCase : Optional[int] = dataclasses.field(
default=__UpperCAmelCase , metadata={"""help""": """Random seed for initialization."""} , )
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : Optional[Any] = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
A_ : Optional[int] = dataset.filter(lambda _UpperCAmelCase : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
A_ : Optional[Any] = int(eval_result * len(_UpperCAmelCase ) )
print(_UpperCAmelCase )
A_ : str = dataset.sort('''probability''' , reverse=_UpperCAmelCase )
A_ : str = dataset.select(range(_UpperCAmelCase ) )
A_ : int = dataset.remove_columns(['''label''', '''probability'''] )
A_ : int = dataset.rename_column('''prediction''' , '''label''' )
A_ : int = dataset.map(lambda _UpperCAmelCase : {"label": idalabel[example["label"]]} )
A_ : List[Any] = dataset.shuffle(seed=args.seed )
A_ : str = os.path.join(_UpperCAmelCase , f"""train_pseudo.{args.data_file_extension}""" )
if args.data_file_extension == "csv":
dataset.to_csv(_UpperCAmelCase , index=_UpperCAmelCase )
else:
dataset.to_json(_UpperCAmelCase )
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ):
"""simple docstring"""
A_ : Any = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
A_ : str = STModelArguments(model_name_or_path=_UpperCAmelCase )
A_ : List[str] = STDataArguments(train_file=_UpperCAmelCase , infer_file=_UpperCAmelCase )
A_ : Any = STTrainingArguments(output_dir=_UpperCAmelCase )
A_ : int = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(_UpperCAmelCase ).items():
setattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
for key, value in kwargs.items():
if hasattr(_UpperCAmelCase , _UpperCAmelCase ):
setattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Sanity checks
A_ : Any = {}
A_ : Dict = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
A_ : str = args.train_file
A_ : str = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
A_ : List[str] = args.eval_file
for key in data_files:
A_ : Dict = data_files[key].split('''.''' )[-1]
assert extension in ["csv", "json"], f"""`{key}_file` should be a csv or a json file."""
if args.data_file_extension is None:
A_ : Optional[Any] = extension
else:
assert extension == args.data_file_extension, f"""`{key}_file` should be a {args.data_file_extension} file`."""
assert (
args.eval_metric in datasets.list_metrics()
), f"""{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}."""
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('''Creating the initial data directory for self-training...''' )
A_ : List[Any] = f"""{args.output_dir}/self-train_iter-{{}}""".format
A_ : Union[str, Any] = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=_UpperCAmelCase )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
accelerator.wait_for_everyone()
A_ : Optional[int] = None
A_ : Union[str, Any] = None
A_ : Optional[int] = 0
A_ : List[Any] = False
# Show the progress bar
A_ : Union[str, Any] = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
A_ : Dict = data_dir_format(_UpperCAmelCase )
assert os.path.exists(_UpperCAmelCase )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
A_ : int = os.path.join(_UpperCAmelCase , '''stage-1''' )
A_ : List[Any] = {
'''accelerator''': accelerator,
'''model_name_or_path''': args.model_name_or_path,
'''cache_dir''': args.cache_dir,
'''do_train''': True,
'''train_file''': data_files['''train'''] if iteration == 0 else data_files['''train_pseudo'''],
'''do_eval''': True if args.eval_file is not None else False,
'''eval_file''': data_files['''eval'''],
'''do_predict''': True,
'''infer_file''': data_files['''infer'''],
'''task_name''': args.task_name,
'''label_list''': args.label_list,
'''output_dir''': current_output_dir,
'''eval_metric''': args.eval_metric,
'''evaluation_strategy''': args.evaluation_strategy,
'''early_stopping_patience''': args.early_stopping_patience,
'''early_stopping_threshold''': args.early_stopping_threshold,
'''seed''': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(_UpperCAmelCase , _UpperCAmelCase ):
arguments_dict.update({key: value} )
A_ : Any = os.path.join(_UpperCAmelCase , '''best-checkpoint''' , _UpperCAmelCase )
if os.path.exists(_UpperCAmelCase ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.''' , _UpperCAmelCase , _UpperCAmelCase , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 1 *****''' , _UpperCAmelCase )
finetune(**_UpperCAmelCase )
accelerator.wait_for_everyone()
assert os.path.exists(_UpperCAmelCase )
logger.info('''Self-training job completed: iteration: %d, stage: 1.''' , _UpperCAmelCase )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
A_ : List[Any] = os.path.join(_UpperCAmelCase , '''best-checkpoint''' )
A_ : Optional[int] = os.path.join(_UpperCAmelCase , '''stage-2''' )
# Update arguments_dict
A_ : int = model_path
A_ : Optional[Any] = data_files['''train''']
A_ : Optional[int] = current_output_dir
A_ : str = os.path.join(_UpperCAmelCase , '''best-checkpoint''' , _UpperCAmelCase )
if os.path.exists(_UpperCAmelCase ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.''' , _UpperCAmelCase , _UpperCAmelCase , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 2 *****''' , _UpperCAmelCase )
finetune(**_UpperCAmelCase )
accelerator.wait_for_everyone()
assert os.path.exists(_UpperCAmelCase )
logger.info('''Self-training job completed: iteration: %d, stage: 2.''' , _UpperCAmelCase )
A_ : Dict = iteration
A_ : List[Any] = data_dir_format(iteration + 1 )
A_ : str = AutoConfig.from_pretrained(os.path.join(_UpperCAmelCase , '''best-checkpoint''' ) )
A_ : List[Any] = config.idalabel
A_ : Any = os.path.join(_UpperCAmelCase , '''eval_results_best-checkpoint.json''' )
A_ : Union[str, Any] = os.path.join(_UpperCAmelCase , '''test_results_best-checkpoint.json''' )
assert os.path.exists(_UpperCAmelCase )
with open(_UpperCAmelCase , '''r''' ) as f:
A_ : Union[str, Any] = float(json.load(_UpperCAmelCase )[args.eval_metric] )
A_ : Union[str, Any] = os.path.join(_UpperCAmelCase , '''infer_output_best-checkpoint.csv''' )
assert os.path.exists(_UpperCAmelCase )
# Loading the dataset from local csv or json files.
A_ : List[str] = load_dataset(args.data_file_extension , data_files={'''data''': data_files['''infer''']} )['''data''']
A_ : Any = load_dataset('''csv''' , data_files={'''data''': infer_output_file} )['''data''']
if accelerator.is_main_process:
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
shutil.copy(_UpperCAmelCase , os.path.join(_UpperCAmelCase , f"""eval_results_iter-{iteration}.json""" ) )
if os.path.exists(_UpperCAmelCase ):
shutil.copy(_UpperCAmelCase , os.path.join(_UpperCAmelCase , f"""test_results_iter-{iteration}.json""" ) )
create_pseudo_labeled_data(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
accelerator.wait_for_everyone()
A_ : int = os.path.join(_UpperCAmelCase , f"""train_pseudo.{args.data_file_extension}""" )
if args.evaluation_strategy != IntervalStrategy.NO.value:
A_ : Union[str, Any] = eval_result
if best_iteration is None:
A_ : str = new_iteration
A_ : Dict = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
A_ : int = new_iteration
A_ : Tuple = new_eval_result
A_ : Union[str, Any] = 0
else:
if new_eval_result == best_eval_result:
A_ : Union[str, Any] = new_iteration
A_ : List[str] = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
A_ : int = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('''Best iteration: %d''' , _UpperCAmelCase )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , _UpperCAmelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_UpperCAmelCase , f"""eval_results_iter-{iteration}.json""" ) , os.path.join(_UpperCAmelCase , '''eval_results_best-iteration.json''' ) , )
else:
# Assume that the last iteration is the best
logger.info('''Best iteration: %d''' , args.max_selftrain_iterations - 1 )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , _UpperCAmelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_UpperCAmelCase , f"""eval_results_iter-{args.max_selftrain_iterations - 1}.json""" ) , os.path.join(_UpperCAmelCase , '''eval_results_best-iteration.json''' ) , )
| 361
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
A = {'''configuration_speech_encoder_decoder''': ['''SpeechEncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['''SpeechEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['''FlaxSpeechEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 125
|
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
A = logging.get_logger(__name__)
# General docstring
A = '''MobileNetV1Config'''
# Base docstring
A = '''google/mobilenet_v1_1.0_224'''
A = [1, 10_24, 7, 7]
# Image classification docstring
A = '''google/mobilenet_v1_1.0_224'''
A = '''tabby, tabby cat'''
A = [
'''google/mobilenet_v1_1.0_224''',
'''google/mobilenet_v1_0.75_192''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any]=None) -> int:
'''simple docstring'''
_lowercase : List[str] = {}
if isinstance(lowerCAmelCase__ , lowerCAmelCase__):
_lowercase : Tuple = model.mobilenet_va
else:
_lowercase : List[Any] = model
_lowercase : List[str] = 'MobilenetV1/Conv2d_0/'
_lowercase : Tuple = backbone.conv_stem.convolution.weight
_lowercase : Optional[int] = backbone.conv_stem.normalization.bias
_lowercase : str = backbone.conv_stem.normalization.weight
_lowercase : List[str] = backbone.conv_stem.normalization.running_mean
_lowercase : Union[str, Any] = backbone.conv_stem.normalization.running_var
for i in range(13):
_lowercase : str = i + 1
_lowercase : Optional[Any] = i * 2
_lowercase : str = backbone.layer[pt_index]
_lowercase : Any = F'''MobilenetV1/Conv2d_{tf_index}_depthwise/'''
_lowercase : int = pointer.convolution.weight
_lowercase : Dict = pointer.normalization.bias
_lowercase : Any = pointer.normalization.weight
_lowercase : Optional[Any] = pointer.normalization.running_mean
_lowercase : List[str] = pointer.normalization.running_var
_lowercase : str = backbone.layer[pt_index + 1]
_lowercase : Any = F'''MobilenetV1/Conv2d_{tf_index}_pointwise/'''
_lowercase : Dict = pointer.convolution.weight
_lowercase : Any = pointer.normalization.bias
_lowercase : Tuple = pointer.normalization.weight
_lowercase : List[Any] = pointer.normalization.running_mean
_lowercase : Optional[Any] = pointer.normalization.running_var
if isinstance(lowerCAmelCase__ , lowerCAmelCase__):
_lowercase : List[Any] = 'MobilenetV1/Logits/Conv2d_1c_1x1/'
_lowercase : Optional[int] = model.classifier.weight
_lowercase : Dict = model.classifier.bias
return tf_to_pt_map
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple) -> Dict:
'''simple docstring'''
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '
'https://www.tensorflow.org/install/ for installation instructions.')
raise
# Load weights from TF model
_lowercase : int = tf.train.list_variables(lowerCAmelCase__)
_lowercase : Tuple = {}
for name, shape in init_vars:
logger.info(F'''Loading TF weight {name} with shape {shape}''')
_lowercase : Tuple = tf.train.load_variable(lowerCAmelCase__ , lowerCAmelCase__)
_lowercase : List[str] = array
# Build TF to PyTorch weights loading map
_lowercase : str = _build_tf_to_pytorch_map(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
for name, pointer in tf_to_pt_map.items():
logger.info(F'''Importing {name}''')
if name not in tf_weights:
logger.info(F'''{name} not in tf pre-trained weights, skipping''')
continue
_lowercase : int = tf_weights[name]
if "depthwise_weights" in name:
logger.info('Transposing depthwise')
_lowercase : Optional[int] = np.transpose(lowerCAmelCase__ , (2, 3, 0, 1))
elif "weights" in name:
logger.info('Transposing')
if len(pointer.shape) == 2: # copying into linear layer
_lowercase : Any = array.squeeze().transpose()
else:
_lowercase : Optional[int] = np.transpose(lowerCAmelCase__ , (3, 2, 0, 1))
if pointer.shape != array.shape:
raise ValueError(F'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''')
logger.info(F'''Initialize PyTorch weight {name} {array.shape}''')
_lowercase : List[Any] = torch.from_numpy(lowerCAmelCase__)
tf_weights.pop(lowerCAmelCase__ , lowerCAmelCase__)
tf_weights.pop(name + '/RMSProp' , lowerCAmelCase__)
tf_weights.pop(name + '/RMSProp_1' , lowerCAmelCase__)
tf_weights.pop(name + '/ExponentialMovingAverage' , lowerCAmelCase__)
logger.info(F'''Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}''')
return model
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : torch.Tensor , lowerCAmelCase__ : nn.Convad) -> torch.Tensor:
'''simple docstring'''
_lowercase , _lowercase : int = features.shape[-2:]
_lowercase , _lowercase : Optional[int] = conv_layer.stride
_lowercase , _lowercase : int = conv_layer.kernel_size
if in_height % stride_height == 0:
_lowercase : Optional[Any] = max(kernel_height - stride_height , 0)
else:
_lowercase : Any = max(kernel_height - (in_height % stride_height) , 0)
if in_width % stride_width == 0:
_lowercase : Tuple = max(kernel_width - stride_width , 0)
else:
_lowercase : Any = max(kernel_width - (in_width % stride_width) , 0)
_lowercase : int = pad_along_width // 2
_lowercase : Union[str, Any] = pad_along_width - pad_left
_lowercase : List[Any] = pad_along_height // 2
_lowercase : Dict = pad_along_height - pad_top
_lowercase : Any = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(lowerCAmelCase__ , lowerCAmelCase__ , 'constant' , 0.0)
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : int ,UpperCamelCase : MobileNetVaConfig ,UpperCamelCase : int ,UpperCamelCase : int ,UpperCamelCase : int ,UpperCamelCase : Optional[int] = 1 ,UpperCamelCase : Optional[int] = 1 ,UpperCamelCase : bool = False ,UpperCamelCase : Optional[bool] = True ,UpperCamelCase : Optional[bool or str] = True ,) -> None:
super().__init__()
_lowercase : Optional[int] = config
if in_channels % groups != 0:
raise ValueError(F'''Input channels ({in_channels}) are not divisible by {groups} groups.''' )
if out_channels % groups != 0:
raise ValueError(F'''Output channels ({out_channels}) are not divisible by {groups} groups.''' )
_lowercase : int = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
_lowercase : int = nn.Convad(
in_channels=UpperCamelCase ,out_channels=UpperCamelCase ,kernel_size=UpperCamelCase ,stride=UpperCamelCase ,padding=UpperCamelCase ,groups=UpperCamelCase ,bias=UpperCamelCase ,padding_mode='zeros' ,)
if use_normalization:
_lowercase : List[Any] = nn.BatchNormad(
num_features=UpperCamelCase ,eps=config.layer_norm_eps ,momentum=0.9_9_9_7 ,affine=UpperCamelCase ,track_running_stats=UpperCamelCase ,)
else:
_lowercase : Any = None
if use_activation:
if isinstance(UpperCamelCase ,UpperCamelCase ):
_lowercase : Optional[int] = ACTaFN[use_activation]
elif isinstance(config.hidden_act ,UpperCamelCase ):
_lowercase : List[Any] = ACTaFN[config.hidden_act]
else:
_lowercase : List[str] = config.hidden_act
else:
_lowercase : Union[str, Any] = None
def _lowerCamelCase ( self : List[Any] ,UpperCamelCase : torch.Tensor ) -> torch.Tensor:
if self.config.tf_padding:
_lowercase : str = apply_tf_padding(UpperCamelCase ,self.convolution )
_lowercase : Tuple = self.convolution(UpperCamelCase )
if self.normalization is not None:
_lowercase : int = self.normalization(UpperCamelCase )
if self.activation is not None:
_lowercase : Optional[int] = self.activation(UpperCamelCase )
return features
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase__ : Any = MobileNetVaConfig
lowerCAmelCase__ : Any = load_tf_weights_in_mobilenet_va
lowerCAmelCase__ : Tuple = "mobilenet_v1"
lowerCAmelCase__ : Optional[int] = "pixel_values"
lowerCAmelCase__ : List[Any] = False
def _lowerCamelCase ( self : List[str] ,UpperCamelCase : Union[nn.Linear, nn.Convad] ) -> None:
if isinstance(UpperCamelCase ,(nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 ,std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCamelCase ,nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
A = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
A = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , lowerCAmelCase__ , )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self : Tuple ,UpperCamelCase : MobileNetVaConfig ,UpperCamelCase : bool = True ) -> str:
super().__init__(UpperCamelCase )
_lowercase : Optional[int] = config
_lowercase : List[str] = 32
_lowercase : List[str] = max(int(depth * config.depth_multiplier ) ,config.min_depth )
_lowercase : Dict = MobileNetVaConvLayer(
UpperCamelCase ,in_channels=config.num_channels ,out_channels=UpperCamelCase ,kernel_size=3 ,stride=2 ,)
_lowercase : str = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
_lowercase : Any = nn.ModuleList()
for i in range(13 ):
_lowercase : List[str] = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
_lowercase : int = max(int(depth * config.depth_multiplier ) ,config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
UpperCamelCase ,in_channels=UpperCamelCase ,out_channels=UpperCamelCase ,kernel_size=3 ,stride=strides[i] ,groups=UpperCamelCase ,) )
self.layer.append(
MobileNetVaConvLayer(
UpperCamelCase ,in_channels=UpperCamelCase ,out_channels=UpperCamelCase ,kernel_size=1 ,) )
_lowercase : List[Any] = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def _lowerCamelCase ( self : Tuple ,UpperCamelCase : Dict ) -> Tuple:
raise NotImplementedError
@add_start_docstrings_to_model_forward(UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=UpperCamelCase ,config_class=_CONFIG_FOR_DOC ,modality='vision' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def _lowerCamelCase ( self : int ,UpperCamelCase : Optional[torch.Tensor] = None ,UpperCamelCase : Optional[bool] = None ,UpperCamelCase : Optional[bool] = None ,) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
_lowercase : Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowercase : int = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
_lowercase : str = self.conv_stem(UpperCamelCase )
_lowercase : str = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
_lowercase : Optional[Any] = layer_module(UpperCamelCase )
if output_hidden_states:
_lowercase : List[str] = all_hidden_states + (hidden_states,)
_lowercase : Union[str, Any] = hidden_states
if self.pooler is not None:
_lowercase : str = torch.flatten(self.pooler(UpperCamelCase ) ,start_dim=1 )
else:
_lowercase : List[Any] = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCamelCase ,pooler_output=UpperCamelCase ,hidden_states=UpperCamelCase ,)
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowerCAmelCase__ , )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self : Optional[int] ,UpperCamelCase : MobileNetVaConfig ) -> None:
super().__init__(UpperCamelCase )
_lowercase : Optional[int] = config.num_labels
_lowercase : Dict = MobileNetVaModel(UpperCamelCase )
_lowercase : Optional[Any] = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
_lowercase : List[str] = nn.Dropout(config.classifier_dropout_prob ,inplace=UpperCamelCase )
_lowercase : int = nn.Linear(UpperCamelCase ,config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=UpperCamelCase ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def _lowerCamelCase ( self : int ,UpperCamelCase : Optional[torch.Tensor] = None ,UpperCamelCase : Optional[bool] = None ,UpperCamelCase : Optional[torch.Tensor] = None ,UpperCamelCase : Optional[bool] = None ,) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
_lowercase : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_lowercase : str = self.mobilenet_va(UpperCamelCase ,output_hidden_states=UpperCamelCase ,return_dict=UpperCamelCase )
_lowercase : List[Any] = outputs.pooler_output if return_dict else outputs[1]
_lowercase : Union[str, Any] = self.classifier(self.dropout(UpperCamelCase ) )
_lowercase : str = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_lowercase : Optional[Any] = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_lowercase : Tuple = 'single_label_classification'
else:
_lowercase : Optional[int] = 'multi_label_classification'
if self.config.problem_type == "regression":
_lowercase : Optional[Any] = MSELoss()
if self.num_labels == 1:
_lowercase : Union[str, Any] = loss_fct(logits.squeeze() ,labels.squeeze() )
else:
_lowercase : str = loss_fct(UpperCamelCase ,UpperCamelCase )
elif self.config.problem_type == "single_label_classification":
_lowercase : Optional[int] = CrossEntropyLoss()
_lowercase : Optional[Any] = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_lowercase : Union[str, Any] = BCEWithLogitsLoss()
_lowercase : Optional[int] = loss_fct(UpperCamelCase ,UpperCamelCase )
if not return_dict:
_lowercase : Tuple = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=UpperCamelCase ,logits=UpperCamelCase ,hidden_states=outputs.hidden_states ,)
| 125
| 1
|
import torch
from diffusers import StableDiffusionPipeline
__UpperCamelCase : Union[str, Any] = """path-to-your-trained-model"""
__UpperCamelCase : List[Any] = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""")
__UpperCamelCase : List[Any] = """A photo of sks dog in a bucket"""
__UpperCamelCase : str = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("""dog-bucket.png""")
| 710
|
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :str = (UnCLIPScheduler,)
def _a ( self : Optional[int] , **_lowerCAmelCase : Any ) -> Tuple:
"""simple docstring"""
__lowercase = {
"""num_train_timesteps""": 1000,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**_lowerCAmelCase )
return config
def _a ( self : Dict ) -> List[Any]:
"""simple docstring"""
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def _a ( self : List[str] ) -> Tuple:
"""simple docstring"""
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_lowerCAmelCase )
def _a ( self : Any ) -> Any:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCAmelCase )
def _a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_lowerCAmelCase )
def _a ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def _a ( self : str ) -> int:
"""simple docstring"""
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_lowerCAmelCase , prev_timestep=_lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(variance_type="""fixed_small_log""" )
__lowercase = scheduler_class(**_lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1e-5
def _a ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(variance_type="""learned_range""" )
__lowercase = scheduler_class(**_lowerCAmelCase )
__lowercase = 0.5
assert scheduler._get_variance(1 , predicted_variance=_lowerCAmelCase ) - -10.1_712_790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=_lowerCAmelCase ) - -5.7_998_052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=_lowerCAmelCase ) - -0.0_010_011 < 1e-5
def _a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**_lowerCAmelCase )
__lowercase = scheduler.timesteps
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
__lowercase = torch.manual_seed(0 )
for i, t in enumerate(_lowerCAmelCase ):
# 1. predict noise residual
__lowercase = model(_lowerCAmelCase , _lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
__lowercase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
__lowercase = pred_prev_sample
__lowercase = torch.sum(torch.abs(_lowerCAmelCase ) )
__lowercase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 252.2_682_495 ) < 1e-2
assert abs(result_mean.item() - 0.3_284_743 ) < 1e-3
def _a ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(25 )
__lowercase = scheduler.timesteps
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
__lowercase = torch.manual_seed(0 )
for i, t in enumerate(_lowerCAmelCase ):
# 1. predict noise residual
__lowercase = model(_lowerCAmelCase , _lowerCAmelCase )
if i + 1 == timesteps.shape[0]:
__lowercase = None
else:
__lowercase = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__lowercase = scheduler.step(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , prev_timestep=_lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
__lowercase = pred_prev_sample
__lowercase = torch.sum(torch.abs(_lowerCAmelCase ) )
__lowercase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 258.2_044_983 ) < 1e-2
assert abs(result_mean.item() - 0.3_362_038 ) < 1e-3
def _a ( self : str ) -> Union[str, Any]:
"""simple docstring"""
pass
def _a ( self : int ) -> List[str]:
"""simple docstring"""
pass
| 53
| 0
|
'''simple docstring'''
from itertools import count
def _lowerCAmelCase ( _UpperCamelCase : int = 50 ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[1] * min_block_length
for n in count(_UpperCamelCase ):
fill_count_functions.append(1 )
for block_length in range(_UpperCamelCase , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_00_00_00:
break
return n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 405
|
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
_lowerCAmelCase = (7_2_0, 1_2_8_0) # Height, Width
_lowerCAmelCase = (0.4, 0.6) # if height or width lower than this scale, drop it.
_lowerCAmelCase = 1 / 1_0_0
_lowerCAmelCase = ""
_lowerCAmelCase = ""
_lowerCAmelCase = ""
_lowerCAmelCase = 2_5_0
def _lowerCAmelCase ( ) ->None:
"""simple docstring"""
lowercase__ , lowercase__ = get_dataset(lowercase , lowercase )
for index in range(lowercase ):
lowercase__ = random.sample(range(len(lowercase ) ) , 4 )
lowercase__ , lowercase__ , lowercase__ = update_image_and_anno(
lowercase , lowercase , lowercase , lowercase , lowercase , filter_scale=lowercase , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
lowercase__ = random_chars(3_2 )
lowercase__ = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
lowercase__ = F'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(F'''{file_root}.jpg''' , lowercase , [cva.IMWRITE_JPEG_QUALITY, 8_5] )
print(F'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
lowercase__ = []
for anno in new_annos:
lowercase__ = anno[3] - anno[1]
lowercase__ = anno[4] - anno[2]
lowercase__ = anno[1] + width / 2
lowercase__ = anno[2] + height / 2
lowercase__ = F'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(lowercase )
with open(F'''{file_root}.txt''' , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def _lowerCAmelCase ( lowercase : str , lowercase : str ) ->tuple[list, list]:
"""simple docstring"""
lowercase__ = []
lowercase__ = []
for label_file in glob.glob(os.path.join(lowercase , '''*.txt''' ) ):
lowercase__ = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(lowercase ) as in_file:
lowercase__ = in_file.readlines()
lowercase__ = os.path.join(lowercase , F'''{label_name}.jpg''' )
lowercase__ = []
for obj_list in obj_lists:
lowercase__ = obj_list.rstrip('''\n''' ).split(''' ''' )
lowercase__ = float(obj[1] ) - float(obj[3] ) / 2
lowercase__ = float(obj[2] ) - float(obj[4] ) / 2
lowercase__ = float(obj[1] ) + float(obj[3] ) / 2
lowercase__ = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(lowercase )
labels.append(lowercase )
return img_paths, labels
def _lowerCAmelCase ( lowercase : list , lowercase : list , lowercase : list[int] , lowercase : tuple[int, int] , lowercase : tuple[float, float] , lowercase : float = 0.0 , ) ->tuple[list, list, str]:
"""simple docstring"""
lowercase__ = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
lowercase__ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowercase__ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowercase__ = int(scale_x * output_size[1] )
lowercase__ = int(scale_y * output_size[0] )
lowercase__ = []
lowercase__ = []
for i, index in enumerate(lowercase ):
lowercase__ = all_img_list[index]
path_list.append(lowercase )
lowercase__ = all_annos[index]
lowercase__ = cva.imread(lowercase )
if i == 0: # top-left
lowercase__ = cva.resize(lowercase , (divid_point_x, divid_point_y) )
lowercase__ = img
for bbox in img_annos:
lowercase__ = bbox[1] * scale_x
lowercase__ = bbox[2] * scale_y
lowercase__ = bbox[3] * scale_x
lowercase__ = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
lowercase__ = cva.resize(lowercase , (output_size[1] - divid_point_x, divid_point_y) )
lowercase__ = img
for bbox in img_annos:
lowercase__ = scale_x + bbox[1] * (1 - scale_x)
lowercase__ = bbox[2] * scale_y
lowercase__ = scale_x + bbox[3] * (1 - scale_x)
lowercase__ = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
lowercase__ = cva.resize(lowercase , (divid_point_x, output_size[0] - divid_point_y) )
lowercase__ = img
for bbox in img_annos:
lowercase__ = bbox[1] * scale_x
lowercase__ = scale_y + bbox[2] * (1 - scale_y)
lowercase__ = bbox[3] * scale_x
lowercase__ = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
lowercase__ = cva.resize(
lowercase , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
lowercase__ = img
for bbox in img_annos:
lowercase__ = scale_x + bbox[1] * (1 - scale_x)
lowercase__ = scale_y + bbox[2] * (1 - scale_y)
lowercase__ = scale_x + bbox[3] * (1 - scale_x)
lowercase__ = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
lowercase__ = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def _lowerCAmelCase ( lowercase : int ) ->str:
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
lowercase__ = ascii_lowercase + digits
return "".join(random.choice(lowercase ) for _ in range(lowercase ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 161
| 0
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =["image_processor", "tokenizer"]
UpperCAmelCase_ ="BlipImageProcessor"
UpperCAmelCase_ =("BertTokenizer", "BertTokenizerFast")
def __init__( self , _A , _A ) -> Tuple:
SCREAMING_SNAKE_CASE_ = False
super().__init__(_A , _A )
SCREAMING_SNAKE_CASE_ = self.image_processor
def __call__( self , _A = None , _A = None , _A = True , _A = False , _A = None , _A = None , _A = 0 , _A = None , _A = None , _A = False , _A = False , _A = False , _A = False , _A = False , _A = True , _A = None , **_A , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
SCREAMING_SNAKE_CASE_ = self.tokenizer
SCREAMING_SNAKE_CASE_ = self.tokenizer(
text=_A , add_special_tokens=_A , padding=_A , truncation=_A , max_length=_A , stride=_A , pad_to_multiple_of=_A , return_attention_mask=_A , return_overflowing_tokens=_A , return_special_tokens_mask=_A , return_offsets_mapping=_A , return_token_type_ids=_A , return_length=_A , verbose=_A , return_tensors=_A , **_A , )
return text_encoding
# add pixel_values
SCREAMING_SNAKE_CASE_ = self.image_processor(_A , return_tensors=_A )
if text is not None:
SCREAMING_SNAKE_CASE_ = self.tokenizer(
text=_A , add_special_tokens=_A , padding=_A , truncation=_A , max_length=_A , stride=_A , pad_to_multiple_of=_A , return_attention_mask=_A , return_overflowing_tokens=_A , return_special_tokens_mask=_A , return_offsets_mapping=_A , return_token_type_ids=_A , return_length=_A , verbose=_A , return_tensors=_A , **_A , )
else:
SCREAMING_SNAKE_CASE_ = None
if text_encoding is not None:
encoding_image_processor.update(_A )
return encoding_image_processor
def _UpperCamelCase ( self , *_A , **_A ) -> Optional[Any]:
return self.tokenizer.batch_decode(*_A , **_A )
def _UpperCamelCase ( self , *_A , **_A ) -> Union[str, Any]:
return self.tokenizer.decode(*_A , **_A )
@property
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 597
|
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , _A , _A=None , _A=True , _A=None , **_A ) -> Any:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = config_class
SCREAMING_SNAKE_CASE_ = has_text_modality
SCREAMING_SNAKE_CASE_ = kwargs
SCREAMING_SNAKE_CASE_ = common_properties
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.config_class(**self.inputs_dict )
SCREAMING_SNAKE_CASE_ = (
['''hidden_size''', '''num_attention_heads''', '''num_hidden_layers''']
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['''vocab_size'''] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(_A , _A ) , msg=F'''`{prop}` does not exist''' )
# Test that config has the common properties as setter
for idx, name in enumerate(_A ):
try:
setattr(_A , _A , _A )
self.parent.assertEqual(
getattr(_A , _A ) , _A , msg=F'''`{name} value {idx} expected, but was {getattr(_A , _A )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(_A ):
try:
SCREAMING_SNAKE_CASE_ = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(_A , _A ) , _A , msg=F'''`{name} value {idx} expected, but was {getattr(_A , _A )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = self.config_class(**self.inputs_dict )
SCREAMING_SNAKE_CASE_ = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , _A )
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ = os.path.join(_A , '''config.json''' )
config_first.to_json_file(_A )
SCREAMING_SNAKE_CASE_ = self.config_class.from_json_file(_A )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ = self.config_class.from_pretrained(_A )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = self.config_class(**self.inputs_dict )
SCREAMING_SNAKE_CASE_ = '''test'''
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ = os.path.join(_A , _A )
config_first.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ = self.config_class.from_pretrained(_A , subfolder=_A )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
SCREAMING_SNAKE_CASE_ = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def _UpperCamelCase ( self ) -> List[Any]:
if self.config_class.is_composition:
return
SCREAMING_SNAKE_CASE_ = self.config_class()
self.parent.assertIsNotNone(_A )
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = copy.deepcopy(_A )
SCREAMING_SNAKE_CASE_ = self.config_class(**_A )
SCREAMING_SNAKE_CASE_ = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('''torch_dtype''', config.torch_dtype, torch.floataa) )
elif getattr(_A , _A ) != value:
wrong_values.append((key, getattr(_A , _A ), value) )
if len(_A ) > 0:
SCREAMING_SNAKE_CASE_ = '''\n'''.join([F'''- {v[0]}: got {v[1]} instead of {v[2]}''' for v in wrong_values] )
raise ValueError(F'''The following keys were not properly set in the config:\n{errors}''' )
def _UpperCamelCase ( self ) -> int:
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 597
| 1
|
from collections import defaultdict
from math import gcd
def lowerCamelCase ( UpperCamelCase : int = 1_50_00_00 ) -> int:
_lowerCamelCase = defaultdict(UpperCamelCase )
_lowerCamelCase = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , UpperCamelCase , 2 ):
if gcd(UpperCamelCase , UpperCamelCase ) > 1:
continue
_lowerCamelCase = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(UpperCamelCase , limit + 1 , UpperCamelCase ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 544
|
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def lowerCamelCase ( UpperCamelCase : int ) -> Union[str, Any]:
_lowerCamelCase = torch.exp(UpperCamelCase )
_lowerCamelCase = torch.sum(UpperCamelCase , dim=1 ) # sum of exp(x_i)
_lowerCamelCase = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(UpperCamelCase ) - B / A
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , snake_case__ : Any ) -> Optional[Any]:
super().__init__()
_lowerCamelCase = config.output_attentions
_lowerCamelCase = config.output_hidden_states
_lowerCamelCase = nn.ModuleList([BertLayer(snake_case__ ) for _ in range(config.num_hidden_layers )] )
_lowerCamelCase = nn.ModuleList([BertHighway(snake_case__ ) for _ in range(config.num_hidden_layers )] )
_lowerCamelCase = [-1 for _ in range(config.num_hidden_layers )]
def _snake_case ( self : Union[str, Any] , snake_case__ : str ) -> Optional[int]:
if (type(snake_case__ ) is float) or (type(snake_case__ ) is int):
for i in range(len(self.early_exit_entropy ) ):
_lowerCamelCase = x
else:
_lowerCamelCase = x
def _snake_case ( self : Tuple , snake_case__ : Optional[int] ) -> int:
_lowerCamelCase = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def _snake_case ( self : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : int=None , snake_case__ : Optional[int]=None , snake_case__ : List[str]=None , snake_case__ : Dict=None , ) -> List[str]:
_lowerCamelCase = ()
_lowerCamelCase = ()
_lowerCamelCase = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
_lowerCamelCase = all_hidden_states + (hidden_states,)
_lowerCamelCase = layer_module(
snake_case__ , snake_case__ , head_mask[i] , snake_case__ , snake_case__ )
_lowerCamelCase = layer_outputs[0]
if self.output_attentions:
_lowerCamelCase = all_attentions + (layer_outputs[1],)
_lowerCamelCase = (hidden_states,)
if self.output_hidden_states:
_lowerCamelCase = current_outputs + (all_hidden_states,)
if self.output_attentions:
_lowerCamelCase = current_outputs + (all_attentions,)
_lowerCamelCase = self.highway[i](snake_case__ )
# logits, pooled_output
if not self.training:
_lowerCamelCase = highway_exit[0]
_lowerCamelCase = entropy(snake_case__ )
_lowerCamelCase = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
_lowerCamelCase = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
_lowerCamelCase = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(snake_case__ , i + 1 )
else:
_lowerCamelCase = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
_lowerCamelCase = all_hidden_states + (hidden_states,)
_lowerCamelCase = (hidden_states,)
if self.output_hidden_states:
_lowerCamelCase = outputs + (all_hidden_states,)
if self.output_attentions:
_lowerCamelCase = outputs + (all_attentions,)
_lowerCamelCase = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'The Bert Model transformer with early exiting (DeeBERT). ' ,SCREAMING_SNAKE_CASE_ ,)
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : Dict , snake_case__ : List[Any] ) -> Any:
super().__init__(snake_case__ )
_lowerCamelCase = config
_lowerCamelCase = BertEmbeddings(snake_case__ )
_lowerCamelCase = DeeBertEncoder(snake_case__ )
_lowerCamelCase = BertPooler(snake_case__ )
self.init_weights()
def _snake_case ( self : Tuple ) -> Tuple:
self.encoder.init_highway_pooler(self.pooler )
def _snake_case ( self : int ) -> List[Any]:
return self.embeddings.word_embeddings
def _snake_case ( self : Optional[int] , snake_case__ : List[Any] ) -> Any:
_lowerCamelCase = value
def _snake_case ( self : List[Any] , snake_case__ : int ) -> Any:
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(snake_case__ )
@add_start_docstrings_to_model_forward(snake_case__ )
def _snake_case ( self : Union[str, Any] , snake_case__ : Dict=None , snake_case__ : Any=None , snake_case__ : Dict=None , snake_case__ : Any=None , snake_case__ : List[str]=None , snake_case__ : Union[str, Any]=None , snake_case__ : str=None , snake_case__ : List[Any]=None , ) -> List[str]:
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
_lowerCamelCase = input_ids.size()
elif inputs_embeds is not None:
_lowerCamelCase = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
_lowerCamelCase = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_lowerCamelCase = torch.ones(snake_case__ , device=snake_case__ )
if encoder_attention_mask is None:
_lowerCamelCase = torch.ones(snake_case__ , device=snake_case__ )
if token_type_ids is None:
_lowerCamelCase = torch.zeros(snake_case__ , dtype=torch.long , device=snake_case__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_lowerCamelCase = self.get_extended_attention_mask(snake_case__ , snake_case__ , snake_case__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
_lowerCamelCase = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
_lowerCamelCase = encoder_attention_mask[:, None, None, :]
_lowerCamelCase = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
_lowerCamelCase = (1.0 - encoder_extended_attention_mask) * -1_0000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_lowerCamelCase = self.get_head_mask(snake_case__ , self.config.num_hidden_layers )
_lowerCamelCase = self.embeddings(
input_ids=snake_case__ , position_ids=snake_case__ , token_type_ids=snake_case__ , inputs_embeds=snake_case__ )
_lowerCamelCase = self.encoder(
snake_case__ , attention_mask=snake_case__ , head_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , )
_lowerCamelCase = encoder_outputs[0]
_lowerCamelCase = self.pooler(snake_case__ )
_lowerCamelCase = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : Optional[int] , snake_case__ : Dict , snake_case__ : Any ) -> str:
_lowerCamelCase = message
_lowerCamelCase = exit_layer # start from 1!
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , snake_case__ : Optional[Any] ) -> Any:
super().__init__()
_lowerCamelCase = BertPooler(snake_case__ )
_lowerCamelCase = nn.Dropout(config.hidden_dropout_prob )
_lowerCamelCase = nn.Linear(config.hidden_size , config.num_labels )
def _snake_case ( self : Optional[Any] , snake_case__ : Dict ) -> Optional[Any]:
# Pooler
_lowerCamelCase = encoder_outputs[0]
_lowerCamelCase = self.pooler(snake_case__ )
# "return" pooler_output
# BertModel
_lowerCamelCase = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
_lowerCamelCase = bmodel_output[1]
_lowerCamelCase = self.dropout(snake_case__ )
_lowerCamelCase = self.classifier(snake_case__ )
return logits, pooled_output
@add_start_docstrings(
'Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. ' ,SCREAMING_SNAKE_CASE_ ,)
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : Optional[int] , snake_case__ : Dict ) -> Dict:
super().__init__(snake_case__ )
_lowerCamelCase = config.num_labels
_lowerCamelCase = config.num_hidden_layers
_lowerCamelCase = DeeBertModel(snake_case__ )
_lowerCamelCase = nn.Dropout(config.hidden_dropout_prob )
_lowerCamelCase = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(snake_case__ )
def _snake_case ( self : Union[str, Any] , snake_case__ : Union[str, Any]=None , snake_case__ : Optional[int]=None , snake_case__ : str=None , snake_case__ : Dict=None , snake_case__ : str=None , snake_case__ : Union[str, Any]=None , snake_case__ : int=None , snake_case__ : Union[str, Any]=-1 , snake_case__ : Any=False , ) -> Union[str, Any]:
_lowerCamelCase = self.num_layers
try:
_lowerCamelCase = self.bert(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , position_ids=snake_case__ , head_mask=snake_case__ , inputs_embeds=snake_case__ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
_lowerCamelCase = outputs[1]
_lowerCamelCase = self.dropout(snake_case__ )
_lowerCamelCase = self.classifier(snake_case__ )
_lowerCamelCase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_lowerCamelCase = e.message
_lowerCamelCase = e.exit_layer
_lowerCamelCase = outputs[0]
if not self.training:
_lowerCamelCase = entropy(snake_case__ )
_lowerCamelCase = []
_lowerCamelCase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_lowerCamelCase = MSELoss()
_lowerCamelCase = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_lowerCamelCase = CrossEntropyLoss()
_lowerCamelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_lowerCamelCase = []
for highway_exit in outputs[-1]:
_lowerCamelCase = highway_exit[0]
if not self.training:
highway_logits_all.append(snake_case__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_lowerCamelCase = MSELoss()
_lowerCamelCase = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_lowerCamelCase = CrossEntropyLoss()
_lowerCamelCase = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(snake_case__ )
if train_highway:
_lowerCamelCase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_lowerCamelCase = (loss,) + outputs
if not self.training:
_lowerCamelCase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_lowerCamelCase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 544
| 1
|
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
_snake_case = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
_snake_case = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
_snake_case = R'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def __lowerCAmelCase ( self : int ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' ),
'references': datasets.Value('string' ),
} ) , homepage='https://github.com/hendrycks/math' , codebase_urls=['https://github.com/hendrycks/math'] , )
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
UpperCamelCase = 0.0
for i, j in zip(__A , __A ):
n_correct += 1.0 if math_equivalence.is_equiv(__A , __A ) else 0.0
UpperCamelCase = n_correct / len(__A )
return {
"accuracy": accuracy,
}
| 711
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {'''configuration_mmbt''': ['''MMBTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings''']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 170
| 0
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class A:
'''simple docstring'''
def __init__( self : Dict , A_ : Optional[Any] , A_ : Optional[int]=13 , A_ : List[Any]=7 , A_ : Dict=True , A_ : Any=True , A_ : int=True , A_ : int=True , A_ : Any=99 , A_ : str=32 , A_ : str=2 , A_ : Optional[Any]=4 , A_ : Optional[int]=37 , A_ : Union[str, Any]="gelu" , A_ : Dict=0.1 , A_ : Optional[Any]=0.1 , A_ : Optional[Any]=512 , A_ : List[str]=16 , A_ : Optional[int]=2 , A_ : Optional[Any]=0.02 , A_ : List[Any]=3 , A_ : List[Any]=4 , A_ : Optional[Any]=None , A_ : Any=0 , ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_input_mask
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = scope
lowerCamelCase_ = projection_dim
def a__ ( self : Any ) -> int:
"""simple docstring"""
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
if self.use_token_type_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , )
lowerCamelCase_ = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self : Dict , A_ : Optional[int] , A_ : Optional[Any] , A_ : Optional[int] , A_ : Optional[Any] , A_ : Any , A_ : Any , A_ : Optional[int] ) -> str:
"""simple docstring"""
lowerCamelCase_ = TFDPRContextEncoder(config=A_ )
lowerCamelCase_ = model(A_ , attention_mask=A_ , token_type_ids=A_ )
lowerCamelCase_ = model(A_ , token_type_ids=A_ )
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def a__ ( self : Any , A_ : List[str] , A_ : int , A_ : List[str] , A_ : List[Any] , A_ : str , A_ : Tuple , A_ : List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = TFDPRQuestionEncoder(config=A_ )
lowerCamelCase_ = model(A_ , attention_mask=A_ , token_type_ids=A_ )
lowerCamelCase_ = model(A_ , token_type_ids=A_ )
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def a__ ( self : Optional[Any] , A_ : List[str] , A_ : List[Any] , A_ : Optional[int] , A_ : Optional[int] , A_ : str , A_ : Union[str, Any] , A_ : int ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = TFDPRReader(config=A_ )
lowerCamelCase_ = model(A_ , attention_mask=A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def a__ ( self : str ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {'input_ids': input_ids}
return config, inputs_dict
@require_tf
class A( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
UpperCamelCase = {'''feature-extraction''': TFDPRQuestionEncoder} if is_tf_available() else {}
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def a__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = TFDPRModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=A_ , hidden_size=37 )
def a__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ ( self : int ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*A_ )
def a__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*A_ )
def a__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*A_ )
@slow
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFDPRContextEncoder.from_pretrained(A_ )
self.assertIsNotNone(A_ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFDPRContextEncoder.from_pretrained(A_ )
self.assertIsNotNone(A_ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFDPRQuestionEncoder.from_pretrained(A_ )
self.assertIsNotNone(A_ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFDPRReader.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_tf
class A( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ ( self : int ) -> int:
"""simple docstring"""
lowerCamelCase_ = TFDPRQuestionEncoder.from_pretrained('facebook/dpr-question_encoder-single-nq-base' )
lowerCamelCase_ = tf.constant(
[[101, 7592, 1010, 2003, 2026, 3899, 10140, 1029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
lowerCamelCase_ = model(A_ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
lowerCamelCase_ = tf.constant(
[
[
0.03236253,
0.12753335,
0.16818509,
0.00279786,
0.3896933,
0.24264945,
0.2178971,
-0.02335227,
-0.08481959,
-0.14324117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 70
|
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def __UpperCamelCase ( a : Any ) ->Union[str, Any]:
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() )
def __UpperCamelCase ( a : Any , a : Tuple ) ->List[Any]:
snake_case = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
snake_case = key.replace('''heads.cmd.mim_head.cls.predictions''' , '''mmm_image_head''' )
snake_case = key.replace('''heads.cmd.mlm_head.cls.predictions''' , '''mmm_text_head''' )
snake_case = key.replace('''heads.cmd.itm_head.cls''' , '''itm_head''' )
snake_case = key.replace('''heads.cmd.itm_head.pooler''' , '''itm_head.pooler''' )
snake_case = key.replace('''heads.cmd.clip_head.logit_scale''' , '''flava.logit_scale''' )
snake_case = key.replace('''heads.fairseq_mlm.cls.predictions''' , '''mlm_head''' )
snake_case = key.replace('''heads.imagenet.mim_head.cls.predictions''' , '''mim_head''' )
snake_case = key.replace('''mm_text_projection''' , '''flava.text_to_mm_projection''' )
snake_case = key.replace('''mm_image_projection''' , '''flava.image_to_mm_projection''' )
snake_case = key.replace('''image_encoder.module''' , '''flava.image_model''' )
snake_case = key.replace('''text_encoder.module''' , '''flava.text_model''' )
snake_case = key.replace('''mm_encoder.module.encoder.cls_token''' , '''flava.multimodal_model.cls_token''' )
snake_case = key.replace('''mm_encoder.module''' , '''flava.multimodal_model''' )
snake_case = key.replace('''text_projection''' , '''flava.text_projection''' )
snake_case = key.replace('''image_projection''' , '''flava.image_projection''' )
snake_case = value.float()
for key, value in codebook_state_dict.items():
snake_case = value
return upgrade
@torch.no_grad()
def __UpperCamelCase ( a : Tuple , a : List[str] , a : Optional[Any] , a : Tuple=None ) ->Union[str, Any]:
if config_path is not None:
snake_case = FlavaConfig.from_pretrained(a )
else:
snake_case = FlavaConfig()
snake_case = FlavaForPreTraining(a ).eval()
snake_case = convert_dalle_checkpoint(a , a , save_checkpoint=a )
if os.path.exists(a ):
snake_case = torch.load(a , map_location='''cpu''' )
else:
snake_case = torch.hub.load_state_dict_from_url(a , map_location='''cpu''' )
snake_case = upgrade_state_dict(a , a )
hf_model.load_state_dict(a )
snake_case = hf_model.state_dict()
snake_case = count_parameters(a )
snake_case = count_parameters(a ) + count_parameters(a )
assert torch.allclose(a , a , atol=1e-3 )
hf_model.save_pretrained(a )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
_lowercase = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 342
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : Optional[Any] = {'configuration_sew': ['SEW_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SEWConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
'SEW_PRETRAINED_MODEL_ARCHIVE_LIST',
'SEWForCTC',
'SEWForSequenceClassification',
'SEWModel',
'SEWPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
_a : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 711
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : str = {"""configuration_wavlm""": ["""WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WavLMConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any] = [
"""WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""WavLMForAudioFrameClassification""",
"""WavLMForCTC""",
"""WavLMForSequenceClassification""",
"""WavLMForXVector""",
"""WavLMModel""",
"""WavLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
_a : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 111
| 0
|
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE :Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :List[Any] = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
}
SCREAMING_SNAKE_CASE :Tuple = {
"""vocab_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"""},
"""merges_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"""},
}
SCREAMING_SNAKE_CASE :Dict = {
"""ctrl""": 256,
}
SCREAMING_SNAKE_CASE :List[Any] = {
"""Pregnancy""": 16_8629,
"""Christianity""": 7675,
"""Explain""": 10_6423,
"""Fitness""": 6_3440,
"""Saving""": 6_3163,
"""Ask""": 2_7171,
"""Ass""": 9_5985,
"""Joke""": 16_3509,
"""Questions""": 4_5622,
"""Thoughts""": 4_9605,
"""Retail""": 5_2342,
"""Feminism""": 16_4338,
"""Writing""": 1_1992,
"""Atheism""": 19_2263,
"""Netflix""": 4_8616,
"""Computing""": 3_9639,
"""Opinion""": 4_3213,
"""Alone""": 4_4967,
"""Funny""": 5_8917,
"""Gaming""": 4_0358,
"""Human""": 4088,
"""India""": 1331,
"""Joker""": 7_7138,
"""Diet""": 3_6206,
"""Legal""": 1_1859,
"""Norman""": 4939,
"""Tip""": 7_2689,
"""Weight""": 5_2343,
"""Movies""": 4_6273,
"""Running""": 2_3425,
"""Science""": 2090,
"""Horror""": 3_7793,
"""Confession""": 6_0572,
"""Finance""": 1_2250,
"""Politics""": 1_6360,
"""Scary""": 19_1985,
"""Support""": 1_2654,
"""Technologies""": 3_2516,
"""Teenage""": 6_6160,
"""Event""": 3_2769,
"""Learned""": 6_7460,
"""Notion""": 18_2770,
"""Wikipedia""": 3_7583,
"""Books""": 6665,
"""Extract""": 7_6050,
"""Confessions""": 10_2701,
"""Conspiracy""": 7_5932,
"""Links""": 6_3674,
"""Narcissus""": 15_0425,
"""Relationship""": 5_4766,
"""Relationships""": 13_4796,
"""Reviews""": 4_1671,
"""News""": 4256,
"""Translation""": 2_6820,
"""multilingual""": 12_8406,
}
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> Tuple:
"""simple docstring"""
UpperCamelCase_ = set()
UpperCamelCase_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase_ = char
UpperCamelCase_ = set(SCREAMING_SNAKE_CASE_ )
return pairs
class __magic_name__ ( snake_case ):
UpperCamelCase_ :List[Any] = VOCAB_FILES_NAMES
UpperCamelCase_ :List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ :int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ :int = CONTROL_CODES
def __init__( self , _lowercase , _lowercase , _lowercase="<unk>" , **_lowercase )-> Optional[Any]:
super().__init__(unk_token=_lowercase , **_lowercase )
with open(_lowercase , encoding="utf-8" ) as vocab_handle:
UpperCamelCase_ = json.load(_lowercase )
UpperCamelCase_ = {v: k for k, v in self.encoder.items()}
with open(_lowercase , encoding="utf-8" ) as merges_handle:
UpperCamelCase_ = merges_handle.read().split("\n" )[1:-1]
UpperCamelCase_ = [tuple(merge.split() ) for merge in merges]
UpperCamelCase_ = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
UpperCamelCase_ = {}
@property
def UpperCAmelCase_ ( self )-> Tuple:
return len(self.encoder )
def UpperCAmelCase_ ( self )-> Any:
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase_ ( self , _lowercase )-> Tuple:
if token in self.cache:
return self.cache[token]
UpperCamelCase_ = tuple(_lowercase )
UpperCamelCase_ = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
UpperCamelCase_ = get_pairs(_lowercase )
if not pairs:
return token
while True:
UpperCamelCase_ = min(_lowercase , key=lambda _lowercase : self.bpe_ranks.get(_lowercase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCamelCase_ , UpperCamelCase_ = bigram
UpperCamelCase_ = []
UpperCamelCase_ = 0
while i < len(_lowercase ):
try:
UpperCamelCase_ = word.index(_lowercase , _lowercase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCamelCase_ = j
if word[i] == first and i < len(_lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCamelCase_ = tuple(_lowercase )
UpperCamelCase_ = new_word
if len(_lowercase ) == 1:
break
else:
UpperCamelCase_ = get_pairs(_lowercase )
UpperCamelCase_ = "@@ ".join(_lowercase )
UpperCamelCase_ = word[:-4]
UpperCamelCase_ = word
return word
def UpperCAmelCase_ ( self , _lowercase )-> Dict:
UpperCamelCase_ = []
UpperCamelCase_ = re.findall(r"\S+\n?" , _lowercase )
for token in words:
split_tokens.extend(list(self.bpe(_lowercase ).split(" " ) ) )
return split_tokens
def UpperCAmelCase_ ( self , _lowercase )-> Optional[Any]:
return self.encoder.get(_lowercase , self.encoder.get(self.unk_token ) )
def UpperCAmelCase_ ( self , _lowercase )-> Optional[int]:
return self.decoder.get(_lowercase , self.unk_token )
def UpperCAmelCase_ ( self , _lowercase )-> List[Any]:
UpperCamelCase_ = " ".join(_lowercase ).replace("@@ " , "" ).strip()
return out_string
def UpperCAmelCase_ ( self , _lowercase , _lowercase = None )-> Tuple[str]:
if not os.path.isdir(_lowercase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCamelCase_ = os.path.join(
_lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase_ = os.path.join(
_lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(_lowercase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowercase , ensure_ascii=_lowercase ) + "\n" )
UpperCamelCase_ = 0
with open(_lowercase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowercase : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
UpperCamelCase_ = token_index
writer.write(" ".join(_lowercase ) + "\n" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 628
|
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE :Optional[Any] = {
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
SCREAMING_SNAKE_CASE :Union[str, Any] = logging.get_logger(__name__)
class __magic_name__ ( snake_case ):
UpperCamelCase_ :Optional[int] = """mask2former"""
UpperCamelCase_ :Dict = ["""swin"""]
UpperCamelCase_ :List[Any] = {"""hidden_size""": """hidden_dim"""}
def __init__( self , _lowercase = None , _lowercase = 256 , _lowercase = 256 , _lowercase = 256 , _lowercase = 1_024 , _lowercase = "relu" , _lowercase = 6 , _lowercase = 10 , _lowercase = 8 , _lowercase = 0.0 , _lowercase = 2_048 , _lowercase = False , _lowercase = False , _lowercase = 4 , _lowercase = 255 , _lowercase = 100 , _lowercase = 0.1 , _lowercase = 2.0 , _lowercase = 5.0 , _lowercase = 5.0 , _lowercase = 12_544 , _lowercase = 3.0 , _lowercase = 0.75 , _lowercase = 0.02 , _lowercase = 1.0 , _lowercase = True , _lowercase = [4, 8, 16, 32] , _lowercase = None , **_lowercase , )-> Union[str, Any]:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `Swin` backbone." )
UpperCamelCase_ = CONFIG_MAPPING["swin"](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=_lowercase , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(_lowercase , _lowercase ):
UpperCamelCase_ = backbone_config.pop("model_type" )
UpperCamelCase_ = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase_ = config_class.from_dict(_lowercase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. "
F"Supported model types: {','.join(self.backbones_supported )}" )
UpperCamelCase_ = backbone_config
UpperCamelCase_ = feature_size
UpperCamelCase_ = mask_feature_size
UpperCamelCase_ = hidden_dim
UpperCamelCase_ = encoder_feedforward_dim
UpperCamelCase_ = activation_function
UpperCamelCase_ = encoder_layers
UpperCamelCase_ = decoder_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = dropout
UpperCamelCase_ = dim_feedforward
UpperCamelCase_ = pre_norm
UpperCamelCase_ = enforce_input_projection
UpperCamelCase_ = common_stride
UpperCamelCase_ = ignore_value
UpperCamelCase_ = num_queries
UpperCamelCase_ = no_object_weight
UpperCamelCase_ = class_weight
UpperCamelCase_ = mask_weight
UpperCamelCase_ = dice_weight
UpperCamelCase_ = train_num_points
UpperCamelCase_ = oversample_ratio
UpperCamelCase_ = importance_sample_ratio
UpperCamelCase_ = init_std
UpperCamelCase_ = init_xavier_std
UpperCamelCase_ = use_auxiliary_loss
UpperCamelCase_ = feature_strides
UpperCamelCase_ = output_auxiliary_logits
UpperCamelCase_ = decoder_layers
super().__init__(**_lowercase )
@classmethod
def UpperCAmelCase_ ( cls , _lowercase , **_lowercase )-> Optional[int]:
return cls(
backbone_config=_lowercase , **_lowercase , )
def UpperCAmelCase_ ( self )-> Dict[str, any]:
UpperCamelCase_ = copy.deepcopy(self.__dict__ )
UpperCamelCase_ = self.backbone_config.to_dict()
UpperCamelCase_ = self.__class__.model_type
return output
| 628
| 1
|
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('1.0.0a'):
raise Exception('requires fairseq >= 1.0.0a')
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
__a = 'Hello world! cécé herlolip'
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Any = FairseqRobertaModel.from_pretrained(_lowercase )
roberta.eval() # disable dropout
UpperCAmelCase_ : Any = roberta.model.encoder.sentence_encoder
UpperCAmelCase_ : List[str] = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
UpperCAmelCase_ : Dict = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our RoBERTa config:''' , _lowercase )
UpperCAmelCase_ : List[Any] = XLMRobertaXLForSequenceClassification(_lowercase ) if classification_head else XLMRobertaXLForMaskedLM(_lowercase )
model.eval()
# Now let's copy all the weights.
# Embeddings
UpperCAmelCase_ : str = roberta_sent_encoder.embed_tokens.weight
UpperCAmelCase_ : int = roberta_sent_encoder.embed_positions.weight
UpperCAmelCase_ : List[Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
UpperCAmelCase_ : Any = roberta_sent_encoder.layer_norm.weight
UpperCAmelCase_ : Any = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
UpperCAmelCase_ : BertLayer = model.roberta.encoder.layer[i]
UpperCAmelCase_ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
UpperCAmelCase_ : RobertaAttention = layer.attention
UpperCAmelCase_ : List[Any] = roberta_layer.self_attn_layer_norm.weight
UpperCAmelCase_ : Tuple = roberta_layer.self_attn_layer_norm.bias
# self attention
UpperCAmelCase_ : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
UpperCAmelCase_ : Dict = roberta_layer.self_attn.q_proj.weight
UpperCAmelCase_ : Optional[Any] = roberta_layer.self_attn.q_proj.bias
UpperCAmelCase_ : List[str] = roberta_layer.self_attn.k_proj.weight
UpperCAmelCase_ : Optional[Any] = roberta_layer.self_attn.k_proj.bias
UpperCAmelCase_ : Optional[Any] = roberta_layer.self_attn.v_proj.weight
UpperCAmelCase_ : int = roberta_layer.self_attn.v_proj.bias
# self-attention output
UpperCAmelCase_ : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
UpperCAmelCase_ : Union[str, Any] = roberta_layer.self_attn.out_proj.weight
UpperCAmelCase_ : Optional[Any] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
UpperCAmelCase_ : Any = roberta_layer.final_layer_norm.weight
UpperCAmelCase_ : str = roberta_layer.final_layer_norm.bias
# intermediate
UpperCAmelCase_ : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
UpperCAmelCase_ : List[str] = roberta_layer.fca.weight
UpperCAmelCase_ : Optional[int] = roberta_layer.fca.bias
# output
UpperCAmelCase_ : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
UpperCAmelCase_ : List[Any] = roberta_layer.fca.weight
UpperCAmelCase_ : List[Any] = roberta_layer.fca.bias
# end of layer
if classification_head:
UpperCAmelCase_ : Dict = roberta.model.classification_heads['''mnli'''].dense.weight
UpperCAmelCase_ : int = roberta.model.classification_heads['''mnli'''].dense.bias
UpperCAmelCase_ : Optional[int] = roberta.model.classification_heads['''mnli'''].out_proj.weight
UpperCAmelCase_ : Dict = roberta.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
UpperCAmelCase_ : Dict = roberta.model.encoder.lm_head.dense.weight
UpperCAmelCase_ : str = roberta.model.encoder.lm_head.dense.bias
UpperCAmelCase_ : Dict = roberta.model.encoder.lm_head.layer_norm.weight
UpperCAmelCase_ : Tuple = roberta.model.encoder.lm_head.layer_norm.bias
UpperCAmelCase_ : Optional[Any] = roberta.model.encoder.lm_head.weight
UpperCAmelCase_ : List[str] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
UpperCAmelCase_ : torch.Tensor = roberta.encode(_lowercase ).unsqueeze(0 ) # batch of size 1
UpperCAmelCase_ : List[str] = model(_lowercase )[0]
if classification_head:
UpperCAmelCase_ : Dict = roberta.model.classification_heads['''mnli'''](roberta.extract_features(_lowercase ) )
else:
UpperCAmelCase_ : List[Any] = roberta.model(_lowercase )[0]
print(our_output.shape , their_output.shape )
UpperCAmelCase_ : Tuple = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
UpperCAmelCase_ : List[str] = torch.allclose(_lowercase , _lowercase , atol=1E-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
pathlib.Path(_lowercase ).mkdir(parents=_lowercase , exist_ok=_lowercase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowercase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--roberta_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
__a = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 300
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__a = logging.get_logger(__name__)
__a = {'vocab_file': 'vocab.txt'}
__a = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
__a = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
__a = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = ConvBertTokenizer
def __init__( self ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE="[UNK]" ,_SCREAMING_SNAKE_CASE="[SEP]" ,_SCREAMING_SNAKE_CASE="[PAD]" ,_SCREAMING_SNAKE_CASE="[CLS]" ,_SCREAMING_SNAKE_CASE="[MASK]" ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=None ,**_SCREAMING_SNAKE_CASE ,) -> Tuple:
super().__init__(
_SCREAMING_SNAKE_CASE ,tokenizer_file=_SCREAMING_SNAKE_CASE ,do_lower_case=_SCREAMING_SNAKE_CASE ,unk_token=_SCREAMING_SNAKE_CASE ,sep_token=_SCREAMING_SNAKE_CASE ,pad_token=_SCREAMING_SNAKE_CASE ,cls_token=_SCREAMING_SNAKE_CASE ,mask_token=_SCREAMING_SNAKE_CASE ,tokenize_chinese_chars=_SCREAMING_SNAKE_CASE ,strip_accents=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' ,_SCREAMING_SNAKE_CASE ) != do_lower_case
or normalizer_state.get('''strip_accents''' ,_SCREAMING_SNAKE_CASE ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' ,_SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars
):
UpperCAmelCase_ : Optional[int] = getattr(_SCREAMING_SNAKE_CASE ,normalizer_state.pop('''type''' ) )
UpperCAmelCase_ : int = do_lower_case
UpperCAmelCase_ : Dict = strip_accents
UpperCAmelCase_ : Optional[Any] = tokenize_chinese_chars
UpperCAmelCase_ : Union[str, Any] = normalizer_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = do_lower_case
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ) -> Dict:
UpperCAmelCase_ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ) -> List[int]:
UpperCAmelCase_ : Tuple = [self.sep_token_id]
UpperCAmelCase_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
UpperCAmelCase_ : Any = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE ,name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
| 300
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class SCREAMING_SNAKE_CASE__ (UpperCamelCase__ ):
__lowerCamelCase : Any = """vit_msn"""
def __init__( self , a=768 , a=12 , a=12 , a=3072 , a="gelu" , a=0.0 , a=0.0 , a=0.02 , a=1e-06 , a=224 , a=16 , a=3 , a=True , **a , ):
super().__init__(**SCREAMING_SNAKE_CASE__)
lowercase__ : Dict = hidden_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : str = num_attention_heads
lowercase__ : List[Any] = intermediate_size
lowercase__ : int = hidden_act
lowercase__ : str = hidden_dropout_prob
lowercase__ : Optional[Any] = attention_probs_dropout_prob
lowercase__ : List[str] = initializer_range
lowercase__ : int = layer_norm_eps
lowercase__ : Any = image_size
lowercase__ : List[str] = patch_size
lowercase__ : Optional[int] = num_channels
lowercase__ : Tuple = qkv_bias
| 164
|
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=13 , SCREAMING_SNAKE_CASE__ : int=7 , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=99 , SCREAMING_SNAKE_CASE__ : Any=32 , SCREAMING_SNAKE_CASE__ : List[str]=5 , SCREAMING_SNAKE_CASE__ : List[Any]=4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=37 , SCREAMING_SNAKE_CASE__ : Optional[int]="gelu" , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : Tuple=512 , SCREAMING_SNAKE_CASE__ : Tuple=16 , SCREAMING_SNAKE_CASE__ : Optional[Any]=2 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.02 , SCREAMING_SNAKE_CASE__ : Dict=4 , ) -> Optional[int]:
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_attention_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = num_choices
def a ( self : Union[str, Any] ) -> Optional[int]:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_attention_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
if self.use_token_type_ids:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def a ( self : List[str] ) -> Union[str, Any]:
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def a ( self : Optional[Any] ) -> Dict:
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = True
lowerCAmelCase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class __lowerCamelCase ( UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
snake_case__ = True
snake_case__ = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def a ( self : int ) -> Dict:
lowerCAmelCase__ = FlaxRobertaPreLayerNormModelTester(self )
@slow
def a ( self : Tuple ) -> Union[str, Any]:
for model_class_name in self.all_model_classes:
lowerCAmelCase__ = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def a ( self : int ) -> Dict:
lowerCAmelCase__ = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )[0]
lowerCAmelCase__ = [1, 11, 50_265]
self.assertEqual(list(output.shape ) , SCREAMING_SNAKE_CASE__ )
# compare the actual values for a slice.
lowerCAmelCase__ = np.array(
[[[40.4_880, 18.0_199, -5.2_367], [-1.8_877, -4.0_885, 10.7_085], [-2.2_613, -5.6_110, 7.2_665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
@slow
def a ( self : Union[str, Any] ) -> Optional[int]:
lowerCAmelCase__ = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )[0]
# compare the actual values for a slice.
lowerCAmelCase__ = np.array(
[[[0.0_208, -0.0_356, 0.0_237], [-0.1_569, -0.0_411, -0.2_626], [0.1_879, 0.0_125, -0.0_089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
| 61
| 0
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
snake_case__ = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
for attribute in key.split('''.''' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
_lowerCamelCase = '''lm_head'''
_lowerCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase )
if weight_type is not None:
_lowerCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase ).shape
else:
_lowerCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
_lowerCamelCase = value
elif weight_type == "weight_g":
_lowerCamelCase = value
elif weight_type == "weight_v":
_lowerCamelCase = value
elif weight_type == "bias":
_lowerCamelCase = value
else:
_lowerCamelCase = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
_lowerCamelCase = []
_lowerCamelCase = fairseq_model.state_dict()
_lowerCamelCase = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
_lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , hf_model.config.feat_extract_norm == '''group''' , )
_lowerCamelCase = True
else:
for key, mapped_key in MAPPING.items():
_lowerCamelCase = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_lowerCamelCase = True
if "*" in mapped_key:
_lowerCamelCase = name.split(__UpperCAmelCase )[0].split('''.''' )[-2]
_lowerCamelCase = mapped_key.replace('''*''' , __UpperCAmelCase )
if "weight_g" in name:
_lowerCamelCase = '''weight_g'''
elif "weight_v" in name:
_lowerCamelCase = '''weight_v'''
elif "bias" in name:
_lowerCamelCase = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCamelCase = '''weight'''
else:
_lowerCamelCase = None
set_recursively(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
continue
if not is_used:
unused_weights.append(__UpperCAmelCase )
logger.warning(F'Unused weights: {unused_weights}' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
_lowerCamelCase = full_name.split('''conv_layers.''' )[-1]
_lowerCamelCase = name.split('''.''' )
_lowerCamelCase = int(items[0] )
_lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
_lowerCamelCase = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
_lowerCamelCase = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
_lowerCamelCase = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
_lowerCamelCase = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__UpperCAmelCase )
@torch.no_grad()
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True ) -> Union[str, Any]:
'''simple docstring'''
if config_path is not None:
_lowerCamelCase = UniSpeechConfig.from_pretrained(__UpperCAmelCase )
else:
_lowerCamelCase = UniSpeechConfig()
if is_finetuned:
if dict_path:
_lowerCamelCase = Dictionary.load_from_json(__UpperCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowerCamelCase = target_dict.pad_index
_lowerCamelCase = target_dict.bos_index
_lowerCamelCase = target_dict.eos_index
_lowerCamelCase = len(target_dict.symbols )
_lowerCamelCase = os.path.join(__UpperCAmelCase , '''vocab.json''' )
if not os.path.isdir(__UpperCAmelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__UpperCAmelCase ) )
return
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
_lowerCamelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
_lowerCamelCase = 42
_lowerCamelCase = 43
with open(__UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
_lowerCamelCase = WavaVecaPhonemeCTCTokenizer(
__UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__UpperCAmelCase , )
_lowerCamelCase = True if config.feat_extract_norm == '''layer''' else False
_lowerCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , )
_lowerCamelCase = WavaVecaProcessor(feature_extractor=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
_lowerCamelCase = UniSpeechForCTC(__UpperCAmelCase )
else:
_lowerCamelCase = UniSpeechForPreTraining(__UpperCAmelCase )
if is_finetuned:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} )
else:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_lowerCamelCase = model[0].eval()
recursively_load_weights(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
hf_unispeech.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
snake_case__ = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 638
|
import argparse
import json
from tqdm import tqdm
def __magic_name__( ) -> List[str]:
'''simple docstring'''
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--src_path''' , type=__UpperCAmelCase , default='''biencoder-nq-dev.json''' , help='''Path to raw DPR training data''' , )
parser.add_argument(
'''--evaluation_set''' , type=__UpperCAmelCase , help='''where to store parsed evaluation_set file''' , )
parser.add_argument(
'''--gold_data_path''' , type=__UpperCAmelCase , help='''where to store parsed gold_data_path file''' , )
_lowerCamelCase = parser.parse_args()
with open(args.src_path , '''r''' ) as src_file, open(args.evaluation_set , '''w''' ) as eval_file, open(
args.gold_data_path , '''w''' ) as gold_file:
_lowerCamelCase = json.load(__UpperCAmelCase )
for dpr_record in tqdm(__UpperCAmelCase ):
_lowerCamelCase = dpr_record['''question''']
_lowerCamelCase = [context['''title'''] for context in dpr_record['''positive_ctxs''']]
eval_file.write(question + '''\n''' )
gold_file.write('''\t'''.join(__UpperCAmelCase ) + '''\n''' )
if __name__ == "__main__":
main()
| 638
| 1
|
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase :
@staticmethod
def __UpperCAmelCase ( *__lowerCamelCase : List[str] , **__lowerCamelCase : Any ):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
A__ : Tuple = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def __UpperCAmelCase ( self : int , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Any ):
"""simple docstring"""
_snake_case = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
_snake_case = [
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
]
return object_detector, examples
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Dict , __lowerCamelCase : List[str] ):
"""simple docstring"""
_snake_case = object_detector(examples[0] , threshold=0.0 )
_snake_case = len(__UpperCAmelCase )
self.assertGreater(__UpperCAmelCase , 0 )
self.assertEqual(
__UpperCAmelCase , [
{
'''score''': ANY(__UpperCAmelCase ),
'''label''': ANY(__UpperCAmelCase ),
'''box''': {'''xmin''': ANY(__UpperCAmelCase ), '''ymin''': ANY(__UpperCAmelCase ), '''xmax''': ANY(__UpperCAmelCase ), '''ymax''': ANY(__UpperCAmelCase )},
}
for i in range(__UpperCAmelCase )
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
pass
@require_torch
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_snake_case = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
_snake_case = object_detector(
'''./tests/fixtures/tests_samples/COCO/000000039769.png''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=0.6_4 , )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.7_2_3_5, '''label''': '''cat''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.7_2_1_8, '''label''': '''remote''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.7_1_8_4, '''label''': '''couch''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.6_7_4_8, '''label''': '''remote''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.6_6_5_6, '''label''': '''cat''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.6_6_1_4, '''label''': '''couch''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.6_4_5_6, '''label''': '''remote''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
{'''score''': 0.6_4_2, '''label''': '''remote''', '''box''': {'''xmin''': 6_7, '''ymin''': 2_7_4, '''xmax''': 9_3, '''ymax''': 2_9_7}},
{'''score''': 0.6_4_1_9, '''label''': '''cat''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
] , )
_snake_case = object_detector(
[
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
] , threshold=0.6_4 , )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.7_2_3_5, '''label''': '''cat''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.7_2_1_8, '''label''': '''remote''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.7_1_8_4, '''label''': '''couch''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.6_7_4_8, '''label''': '''remote''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.6_6_5_6, '''label''': '''cat''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.6_6_1_4, '''label''': '''couch''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.6_4_5_6, '''label''': '''remote''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
{'''score''': 0.6_4_2, '''label''': '''remote''', '''box''': {'''xmin''': 6_7, '''ymin''': 2_7_4, '''xmax''': 9_3, '''ymax''': 2_9_7}},
{'''score''': 0.6_4_1_9, '''label''': '''cat''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
]
] , )
@require_torch
@slow
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
_snake_case = pipeline('''zero-shot-object-detection''' )
_snake_case = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.2_5_3_7, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.1_4_7_4, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.1_2_0_8, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
] , )
_snake_case = object_detector(
[
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
] , )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.2_5_3_7, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.1_4_7_4, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.1_2_0_8, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
],
[
{'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.2_5_3_7, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.1_4_7_4, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.1_2_0_8, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
],
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
pass
@require_torch
@slow
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
_snake_case = 0.2
_snake_case = pipeline('''zero-shot-object-detection''' )
_snake_case = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=__UpperCAmelCase , )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.2_5_3_7, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
] , )
@require_torch
@slow
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_snake_case = 2
_snake_case = pipeline('''zero-shot-object-detection''' )
_snake_case = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , top_k=__UpperCAmelCase , )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
] , )
| 103
|
def a_ (__A , __A , __A , __A ) -> int:
"""simple docstring"""
__a , __a : Any = len(__A ), len(grid[0] )
if (
min(__A , __A ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
__a : Dict = 0
count += depth_first_search(__A , row + 1 , __A , __A )
count += depth_first_search(__A , row - 1 , __A , __A )
count += depth_first_search(__A , __A , col + 1 , __A )
count += depth_first_search(__A , __A , col - 1 , __A )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351
| 0
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 705
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase__ : Any = {
"configuration_mvp": ["MVP_PRETRAINED_CONFIG_ARCHIVE_MAP", "MvpConfig", "MvpOnnxConfig"],
"tokenization_mvp": ["MvpTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Optional[int] = ["MvpTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : str = [
"MVP_PRETRAINED_MODEL_ARCHIVE_LIST",
"MvpForCausalLM",
"MvpForConditionalGeneration",
"MvpForQuestionAnswering",
"MvpForSequenceClassification",
"MvpModel",
"MvpPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 620
| 0
|
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
lowerCamelCase_ = datasets.utils.logging.get_logger(__name__)
lowerCamelCase_ = ['names', 'prefix']
lowerCamelCase_ = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
lowerCamelCase_ = ['encoding_errors', 'on_bad_lines']
lowerCamelCase_ = ['date_format']
@dataclass
class lowercase_ ( datasets.BuilderConfig ):
"""simple docstring"""
lowerCamelCase_ = ","
lowerCamelCase_ = None
lowerCamelCase_ = "infer"
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = True
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = False
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = True
lowerCamelCase_ = None
lowerCamelCase_ = "."
lowerCamelCase_ = None
lowerCamelCase_ = '"'
lowerCamelCase_ = 0
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = 0
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = None
lowerCamelCase_ = 10000
lowerCamelCase_ = None
lowerCamelCase_ = "strict"
lowerCamelCase_ = "error"
lowerCamelCase_ = None
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
if self.delimiter is not None:
_SCREAMING_SNAKE_CASE = self.delimiter
if self.column_names is not None:
_SCREAMING_SNAKE_CASE = self.column_names
@property
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , __lowerCamelCase ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class lowercase_ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
lowerCamelCase_ = CsvConfig
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def lowerCAmelCase_ ( self : int , __lowerCamelCase : Dict ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_SCREAMING_SNAKE_CASE = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__lowerCamelCase , (str, list, tuple) ):
_SCREAMING_SNAKE_CASE = data_files
if isinstance(__lowerCamelCase , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE = [files]
_SCREAMING_SNAKE_CASE = [dl_manager.iter_files(__lowerCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
_SCREAMING_SNAKE_CASE = []
for split_name, files in data_files.items():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE = [files]
_SCREAMING_SNAKE_CASE = [dl_manager.iter_files(__lowerCamelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=__lowerCamelCase , gen_kwargs={"files": files} ) )
return splits
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCamelCase : pa.Table ):
"""simple docstring"""
if self.config.features is not None:
_SCREAMING_SNAKE_CASE = self.config.features.arrow_schema
if all(not require_storage_cast(__lowerCamelCase ) for feature in self.config.features.values() ):
# cheaper cast
_SCREAMING_SNAKE_CASE = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=__lowerCamelCase )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
_SCREAMING_SNAKE_CASE = table_cast(__lowerCamelCase , __lowerCamelCase )
return pa_table
def lowerCAmelCase_ ( self : Dict , __lowerCamelCase : int ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
_SCREAMING_SNAKE_CASE = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(__lowerCamelCase ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(__lowerCamelCase ) ):
_SCREAMING_SNAKE_CASE = pd.read_csv(__lowerCamelCase , iterator=__lowerCamelCase , dtype=__lowerCamelCase , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE = pa.Table.from_pandas(__lowerCamelCase )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__lowerCamelCase )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(__lowerCamelCase )}: {e}""" )
raise
| 418
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class lowercase_ ( A ):
"""simple docstring"""
lowerCamelCase_ = '''table-transformer'''
lowerCamelCase_ = ['''past_key_values''']
lowerCamelCase_ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : int , __lowerCamelCase : int=True , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Dict=3 , __lowerCamelCase : Union[str, Any]=1_0_0 , __lowerCamelCase : Optional[int]=6 , __lowerCamelCase : Dict=2_0_4_8 , __lowerCamelCase : Any=8 , __lowerCamelCase : Any=6 , __lowerCamelCase : Any=2_0_4_8 , __lowerCamelCase : List[str]=8 , __lowerCamelCase : int=0.0 , __lowerCamelCase : int=0.0 , __lowerCamelCase : int=True , __lowerCamelCase : Union[str, Any]="relu" , __lowerCamelCase : Union[str, Any]=2_5_6 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : str=0.0 , __lowerCamelCase : Union[str, Any]=0.0 , __lowerCamelCase : List[Any]=0.0_2 , __lowerCamelCase : List[str]=1.0 , __lowerCamelCase : int=False , __lowerCamelCase : int="sine" , __lowerCamelCase : Optional[Any]="resnet50" , __lowerCamelCase : Any=True , __lowerCamelCase : List[str]=False , __lowerCamelCase : Optional[Any]=1 , __lowerCamelCase : Optional[int]=5 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : Optional[int]=1 , __lowerCamelCase : Union[str, Any]=1 , __lowerCamelCase : List[Any]=5 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : Any=0.1 , **__lowerCamelCase : str , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_SCREAMING_SNAKE_CASE = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE = backbone_config.get("model_type" )
_SCREAMING_SNAKE_CASE = CONFIG_MAPPING[backbone_model_type]
_SCREAMING_SNAKE_CASE = config_class.from_dict(__lowerCamelCase )
# set timm attributes to None
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = None, None, None
_SCREAMING_SNAKE_CASE = use_timm_backbone
_SCREAMING_SNAKE_CASE = backbone_config
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = num_queries
_SCREAMING_SNAKE_CASE = d_model
_SCREAMING_SNAKE_CASE = encoder_ffn_dim
_SCREAMING_SNAKE_CASE = encoder_layers
_SCREAMING_SNAKE_CASE = encoder_attention_heads
_SCREAMING_SNAKE_CASE = decoder_ffn_dim
_SCREAMING_SNAKE_CASE = decoder_layers
_SCREAMING_SNAKE_CASE = decoder_attention_heads
_SCREAMING_SNAKE_CASE = dropout
_SCREAMING_SNAKE_CASE = attention_dropout
_SCREAMING_SNAKE_CASE = activation_dropout
_SCREAMING_SNAKE_CASE = activation_function
_SCREAMING_SNAKE_CASE = init_std
_SCREAMING_SNAKE_CASE = init_xavier_std
_SCREAMING_SNAKE_CASE = encoder_layerdrop
_SCREAMING_SNAKE_CASE = decoder_layerdrop
_SCREAMING_SNAKE_CASE = encoder_layers
_SCREAMING_SNAKE_CASE = auxiliary_loss
_SCREAMING_SNAKE_CASE = position_embedding_type
_SCREAMING_SNAKE_CASE = backbone
_SCREAMING_SNAKE_CASE = use_pretrained_backbone
_SCREAMING_SNAKE_CASE = dilation
# Hungarian matcher
_SCREAMING_SNAKE_CASE = class_cost
_SCREAMING_SNAKE_CASE = bbox_cost
_SCREAMING_SNAKE_CASE = giou_cost
# Loss coefficients
_SCREAMING_SNAKE_CASE = mask_loss_coefficient
_SCREAMING_SNAKE_CASE = dice_loss_coefficient
_SCREAMING_SNAKE_CASE = bbox_loss_coefficient
_SCREAMING_SNAKE_CASE = giou_loss_coefficient
_SCREAMING_SNAKE_CASE = eos_coefficient
super().__init__(is_encoder_decoder=__lowerCamelCase , **__lowerCamelCase )
@property
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
return self.d_model
class lowercase_ ( A ):
"""simple docstring"""
lowerCamelCase_ = version.parse('''1.11''' )
@property
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
return 1e-5
@property
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
return 1_2
| 418
| 1
|
"""simple docstring"""
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class __magic_name__ ( unittest.TestCase ):
def lowercase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowercase: Tuple = '''hf-internal-testing/tiny-random-t5'''
_lowercase: str = AutoTokenizer.from_pretrained(A_ )
_lowercase: List[Any] = AutoModelForSeqaSeqLM.from_pretrained(A_ )
_lowercase: Dict = tokenizer('''This is me''' , return_tensors='''pt''' )
_lowercase: str = model.to_bettertransformer()
self.assertTrue(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
_lowercase: Optional[Any] = model.generate(**A_ )
_lowercase: Any = model.reverse_bettertransformer()
self.assertFalse(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A_ )
_lowercase: List[Any] = AutoModelForSeqaSeqLM.from_pretrained(A_ )
self.assertFalse(
any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
_lowercase: List[Any] = model_reloaded.generate(**A_ )
self.assertTrue(torch.allclose(A_ , A_ ) )
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
_lowercase: List[Any] = '''hf-internal-testing/tiny-random-t5'''
_lowercase: str = AutoModelForSeqaSeqLM.from_pretrained(A_ )
_lowercase: Optional[int] = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(A_ ):
model.save_pretrained(A_ )
_lowercase: List[str] = model.reverse_bettertransformer()
model.save_pretrained(A_ )
| 720
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
A__ : Dict = logging.get_logger(__name__)
A__ : int = {
'Intel/dpt-large': 'https://huggingface.co/Intel/dpt-large/resolve/main/config.json',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class __magic_name__ ( SCREAMING_SNAKE_CASE__ ):
UpperCamelCase_ = '''dpt'''
def __init__( self , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.0 , A_=0.0 , A_=0.02 , A_=1E-12 , A_=384 , A_=16 , A_=3 , A_=False , A_=True , A_=[2, 5, 8, 11] , A_="project" , A_=[4, 2, 1, 0.5] , A_=[96, 192, 384, 768] , A_=256 , A_=-1 , A_=False , A_=True , A_=0.4 , A_=255 , A_=0.1 , A_=[1, 1024, 24, 24] , A_=[0, 1] , A_=None , **A_ , ) -> int:
"""simple docstring"""
super().__init__(**A_ )
_lowercase: Union[str, Any] = hidden_size
_lowercase: str = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('''Initializing the config with a `BiT` backbone.''' )
_lowercase: Dict = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
}
_lowercase: List[str] = BitConfig(**A_ )
elif isinstance(A_ , A_ ):
logger.info('''Initializing the config with a `BiT` backbone.''' )
_lowercase: Dict = BitConfig(**A_ )
elif isinstance(A_ , A_ ):
_lowercase: str = backbone_config
else:
raise ValueError(
f'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
_lowercase: Dict = backbone_featmap_shape
_lowercase: Optional[int] = neck_ignore_stages
if readout_type != "project":
raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' )
else:
_lowercase: Any = None
_lowercase: str = None
_lowercase: Optional[Any] = []
_lowercase: Tuple = num_hidden_layers
_lowercase: Optional[int] = num_attention_heads
_lowercase: Any = intermediate_size
_lowercase: List[Any] = hidden_act
_lowercase: Union[str, Any] = hidden_dropout_prob
_lowercase: Tuple = attention_probs_dropout_prob
_lowercase: Dict = initializer_range
_lowercase: Dict = layer_norm_eps
_lowercase: Any = image_size
_lowercase: Optional[int] = patch_size
_lowercase: Optional[int] = num_channels
_lowercase: int = qkv_bias
_lowercase: List[str] = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' )
_lowercase: Union[str, Any] = readout_type
_lowercase: Optional[Any] = reassemble_factors
_lowercase: List[str] = neck_hidden_sizes
_lowercase: Tuple = fusion_hidden_size
_lowercase: int = head_in_index
_lowercase: Optional[int] = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
_lowercase: int = use_auxiliary_head
_lowercase: Dict = auxiliary_loss_weight
_lowercase: List[Any] = semantic_loss_ignore_index
_lowercase: Dict = semantic_classifier_dropout
def lowercase_ ( self ) -> str:
"""simple docstring"""
_lowercase: Optional[int] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
_lowercase: List[str] = self.backbone_config.to_dict()
_lowercase: List[Any] = self.__class__.model_type
return output
| 272
| 0
|
"""simple docstring"""
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
_lowercase = version.parse(importlib_metadata.version('''nltk'''))
if NLTK_VERSION >= version.Version('''3.6.4'''):
from nltk import word_tokenize
_lowercase = '''\
@inproceedings{banarjee2005,
title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},
author = {Banerjee, Satanjeev and Lavie, Alon},
booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},
month = jun,
year = {2005},
address = {Ann Arbor, Michigan},
publisher = {Association for Computational Linguistics},
url = {https://www.aclweb.org/anthology/W05-0909},
pages = {65--72},
}
'''
_lowercase = '''\
METEOR, an automatic metric for machine translation evaluation
that is based on a generalized concept of unigram matching between the
machine-produced translation and human-produced reference translations.
Unigrams can be matched based on their surface forms, stemmed forms,
and meanings; furthermore, METEOR can be easily extended to include more
advanced matching strategies. Once all generalized unigram matches
between the two strings have been found, METEOR computes a score for
this matching using a combination of unigram-precision, unigram-recall, and
a measure of fragmentation that is designed to directly capture how
well-ordered the matched words in the machine translation are in relation
to the reference.
METEOR gets an R correlation value of 0.347 with human evaluation on the Arabic
data and 0.331 on the Chinese data. This is shown to be an improvement on
using simply unigram-precision, unigram-recall and their harmonic F1
combination.
'''
_lowercase = '''
Computes METEOR score of translated segments against one or more references.
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
alpha: Parameter for controlling relative weights of precision and recall. default: 0.9
beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3
gamma: Relative weight assigned to fragmentation penalty. default: 0.5
Returns:
\'meteor\': meteor score.
Examples:
>>> meteor = datasets.load_metric(\'meteor\')
>>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]
>>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]
>>> results = meteor.compute(predictions=predictions, references=references)
>>> print(round(results["meteor"], 4))
0.6944
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Value('string' ,id='sequence' ),
} ) ,codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] ,reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] ,)
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : List[Any] ) -> Tuple:
import nltk
nltk.download('wordnet' )
if NLTK_VERSION >= version.Version('3.6.5' ):
nltk.download('punkt' )
if NLTK_VERSION >= version.Version('3.6.6' ):
nltk.download('omw-1.4' )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Any ,A_ : Tuple ,A_ : Union[str, Any]=0.9 ,A_ : List[str]=3 ,A_ : Dict=0.5 ) -> Optional[Any]:
if NLTK_VERSION >= version.Version('3.6.5' ):
A = [
meteor_score.single_meteor_score(
word_tokenize(A_ ) ,word_tokenize(A_ ) ,alpha=A_ ,beta=A_ ,gamma=A_ )
for ref, pred in zip(A_ ,A_ )
]
else:
A = [
meteor_score.single_meteor_score(A_ ,A_ ,alpha=A_ ,beta=A_ ,gamma=A_ )
for ref, pred in zip(A_ ,A_ )
]
return {"meteor": np.mean(A_ )}
| 91
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
__UpperCamelCase = None
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = '''▁'''
__UpperCamelCase = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''},
'''tokenizer_file''': {
'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'''
},
}
__UpperCamelCase = {
'''google/pegasus-xsum''': 512,
}
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Any = PegasusTokenizer
SCREAMING_SNAKE_CASE_ : Optional[int] = ["""input_ids""", """attention_mask"""]
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="<pad>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<mask_2>" , lowerCAmelCase__="<mask_1>" , lowerCAmelCase__=None , lowerCAmelCase__=103 , **lowerCAmelCase__ , ) -> Dict:
SCREAMING_SNAKE_CASE = offset
if additional_special_tokens is not None:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError(
F'additional_special_tokens should be of type {type(lowerCAmelCase__ )}, but is'
F' {type(lowerCAmelCase__ )}' )
SCREAMING_SNAKE_CASE = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F'<unk_{i}>' for i in range(len(lowerCAmelCase__ ) , self.offset - 1 )
]
if len(set(lowerCAmelCase__ ) ) != len(lowerCAmelCase__ ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
F' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' )
SCREAMING_SNAKE_CASE = additional_special_tokens_extended
else:
SCREAMING_SNAKE_CASE = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F'<unk_{i}>' for i in range(2 , self.offset )]
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , mask_token_sent=lowerCAmelCase__ , offset=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , **lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE = vocab_file
SCREAMING_SNAKE_CASE = False if not self.vocab_file else True
def __A ( self , lowerCAmelCase__ ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'There should be 3 special tokens: mask_token, pad_token, and eos_token +'
F' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}' )
return [1 if x in all_special_ids else 0 for x in seq]
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(lowerCAmelCase__ )
elif token_ids_a is None:
return self._special_token_mask(lowerCAmelCase__ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE = os.path.join(
lowerCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
return (out_vocab_file,)
| 247
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {
"""configuration_longformer""": [
"""LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""LongformerConfig""",
"""LongformerOnnxConfig""",
],
"""tokenization_longformer""": ["""LongformerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""LongformerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongformerForMaskedLM""",
"""LongformerForMultipleChoice""",
"""LongformerForQuestionAnswering""",
"""LongformerForSequenceClassification""",
"""LongformerForTokenClassification""",
"""LongformerModel""",
"""LongformerPreTrainedModel""",
"""LongformerSelfAttention""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLongformerForMaskedLM""",
"""TFLongformerForMultipleChoice""",
"""TFLongformerForQuestionAnswering""",
"""TFLongformerForSequenceClassification""",
"""TFLongformerForTokenClassification""",
"""TFLongformerModel""",
"""TFLongformerPreTrainedModel""",
"""TFLongformerSelfAttention""",
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 711
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
lowerCamelCase__ = random.Random()
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int=1.0 , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : Any=None ):
"""simple docstring"""
if rng is None:
__a = global_rng
__a = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Dict , __lowercase : Tuple , __lowercase : Tuple=7 , __lowercase : Optional[int]=400 , __lowercase : int=2000 , __lowercase : List[Any]=2048 , __lowercase : List[str]=128 , __lowercase : Union[str, Any]=1 , __lowercase : str=512 , __lowercase : List[str]=30 , __lowercase : Tuple=44100 , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = min_seq_length
__a = max_seq_length
__a = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__a = spectrogram_length
__a = feature_size
__a = num_audio_channels
__a = hop_length
__a = chunk_length
__a = sampling_rate
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def UpperCamelCase_ ( self : Optional[int] , __lowercase : Optional[int]=False , __lowercase : Any=False ):
'''simple docstring'''
def _flatten(__lowercase : Tuple ):
return list(itertools.chain(*__lowercase ) )
if equal_length:
__a = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__a = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__a = [np.asarray(__lowercase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Dict =TvltFeatureExtractor
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
__a = TvltFeatureExtractionTester(self )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(__lowercase , """spectrogram_length""" ) )
self.assertTrue(hasattr(__lowercase , """feature_size""" ) )
self.assertTrue(hasattr(__lowercase , """num_audio_channels""" ) )
self.assertTrue(hasattr(__lowercase , """hop_length""" ) )
self.assertTrue(hasattr(__lowercase , """chunk_length""" ) )
self.assertTrue(hasattr(__lowercase , """sampling_rate""" ) )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
__a = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__a = feat_extract_first.save_pretrained(__lowercase )[0]
check_json_file_has_correct_format(__lowercase )
__a = self.feature_extraction_class.from_pretrained(__lowercase )
__a = feat_extract_first.to_dict()
__a = feat_extract_second.to_dict()
__a = dict_first.pop("""mel_filters""" )
__a = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(__lowercase , __lowercase ) )
self.assertEqual(__lowercase , __lowercase )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
__a = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__a = os.path.join(__lowercase , """feat_extract.json""" )
feat_extract_first.to_json_file(__lowercase )
__a = self.feature_extraction_class.from_json_file(__lowercase )
__a = feat_extract_first.to_dict()
__a = feat_extract_second.to_dict()
__a = dict_first.pop("""mel_filters""" )
__a = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(__lowercase , __lowercase ) )
self.assertEqual(__lowercase , __lowercase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
# Initialize feature_extractor
__a = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
__a = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__a = [np.asarray(__lowercase ) for speech_input in speech_inputs]
# Test not batched input
__a = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
__a = feature_extractor(__lowercase , return_tensors="""np""" , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
__a = feature_extractor(
__lowercase , return_tensors="""np""" , sampling_rate=44100 , mask_audio=__lowercase ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
__a = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__a = np.asarray(__lowercase )
__a = feature_extractor(__lowercase , return_tensors="""np""" , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def UpperCamelCase_ ( self : Optional[int] , __lowercase : str ):
'''simple docstring'''
__a = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
__a = ds.sort("""id""" ).select(range(__lowercase ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = self._load_datasamples(1 )
__a = TvltFeatureExtractor()
__a = feature_extractor(__lowercase , return_tensors="""pt""" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
__a = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , __lowercase , atol=1E-4 ) )
| 547
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.