code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class A :
def __init__( self : str , __magic_name__ : Any ):
"""simple docstring"""
lowerCAmelCase__ = data
lowerCAmelCase__ = None
class A :
def __init__( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = None
lowerCAmelCase__ = None
def __iter__( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = self.head
while self.head:
yield node.data
lowerCAmelCase__ = node.next
if node == self.head:
break
def __len__( self : Tuple ):
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self : int ):
"""simple docstring"""
return "->".join(str(__magic_name__ ) for item in iter(self ) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : Any ):
"""simple docstring"""
self.insert_nth(len(self ) , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : Any ):
"""simple docstring"""
self.insert_nth(0 , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : int , __magic_name__ : Any ):
"""simple docstring"""
if index < 0 or index > len(self ):
raise IndexError("list index out of range." )
lowerCAmelCase__ = Node(__magic_name__ )
if self.head is None:
lowerCAmelCase__ = new_node # first node points itself
lowerCAmelCase__ = lowerCAmelCase__ = new_node
elif index == 0: # insert at head
lowerCAmelCase__ = self.head
lowerCAmelCase__ = lowerCAmelCase__ = new_node
else:
lowerCAmelCase__ = self.head
for _ in range(index - 1 ):
lowerCAmelCase__ = temp.next
lowerCAmelCase__ = temp.next
lowerCAmelCase__ = new_node
if index == len(self ) - 1: # insert at tail
lowerCAmelCase__ = new_node
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
return self.delete_nth(0 )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : int = 0 ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise IndexError("list index out of range." )
lowerCAmelCase__ = self.head
if self.head == self.tail: # just one node
lowerCAmelCase__ = lowerCAmelCase__ = None
elif index == 0: # delete head node
lowerCAmelCase__ = self.tail.next.next
lowerCAmelCase__ = self.head.next
else:
lowerCAmelCase__ = self.head
for _ in range(index - 1 ):
lowerCAmelCase__ = temp.next
lowerCAmelCase__ = temp.next
lowerCAmelCase__ = temp.next.next
if index == len(self ) - 1: # delete at tail
lowerCAmelCase__ = temp
return delete_node.data
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return len(self ) == 0
def A ( ) -> None:
'''simple docstring'''
lowerCAmelCase__ = CircularLinkedList()
assert len(UpperCamelCase_ ) == 0
assert circular_linked_list.is_empty() is True
assert str(UpperCamelCase_ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(UpperCamelCase_ ) == i
circular_linked_list.insert_nth(UpperCamelCase_ , i + 1 )
assert str(UpperCamelCase_ ) == "->".join(str(UpperCamelCase_ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(UpperCamelCase_ ) == "->".join(str(UpperCamelCase_ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(UpperCamelCase_ ) == "->".join(str(UpperCamelCase_ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(UpperCamelCase_ ) == "->".join(str(UpperCamelCase_ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(UpperCamelCase_ ) == "->".join(str(UpperCamelCase_ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
|
from collections.abc import Sequence
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Sequence[int] | None = None ) -> int:
if nums is None or not nums:
raise ValueError('''Input sequence should not be empty''' )
SCREAMING_SNAKE_CASE_ : Tuple =nums[0]
for i in range(1 , len(UpperCAmelCase_ ) ):
SCREAMING_SNAKE_CASE_ : Any =nums[i]
SCREAMING_SNAKE_CASE_ : Optional[int] =max(UpperCAmelCase_ , ans + num , UpperCAmelCase_ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
_lowercase = int(input("""Enter number of elements : """).strip())
_lowercase = list(map(int, input("""\nEnter the numbers : """).strip().split()))[:n]
print(max_subsequence_sum(array))
| 443
| 0
|
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = FlaxXLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoTokenizer.from_pretrained("""xlm-roberta-base""" )
SCREAMING_SNAKE_CASE : Tuple = """The dog is cute and lives in the garden house"""
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.array([tokenizer.encode(lowerCamelCase_ )] )
SCREAMING_SNAKE_CASE : List[str] = (1, 12, 7_68) # batch_size, sequence_length, embedding_vector_dim
SCREAMING_SNAKE_CASE : Tuple = jnp.array(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ )["""last_hidden_state"""]
self.assertEqual(output.shape , lowerCamelCase_ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , lowerCamelCase_ , atol=1e-3 ) )
| 79
|
'''simple docstring'''
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = None
@staticmethod
def lowerCamelCase_ ( ):
'''simple docstring'''
raise NotImplementedError
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : str ):
'''simple docstring'''
raise NotImplementedError
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Dict ):
'''simple docstring'''
raise NotImplementedError
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
if not self.is_available():
raise RuntimeError(
f'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' )
@classmethod
def lowerCamelCase_ ( cls : Any ):
'''simple docstring'''
return f'''`pip install {cls.pip_package or cls.name}`'''
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''optuna'''
@staticmethod
def lowerCamelCase_ ( ):
'''simple docstring'''
return is_optuna_available()
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : Dict ):
'''simple docstring'''
return run_hp_search_optuna(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Any ):
'''simple docstring'''
return default_hp_space_optuna(lowerCamelCase_ )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''ray'''
SCREAMING_SNAKE_CASE__ = '''\'ray[tune]\''''
@staticmethod
def lowerCamelCase_ ( ):
'''simple docstring'''
return is_ray_available()
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : int ):
'''simple docstring'''
return run_hp_search_ray(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
return default_hp_space_ray(lowerCamelCase_ )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''sigopt'''
@staticmethod
def lowerCamelCase_ ( ):
'''simple docstring'''
return is_sigopt_available()
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : int ):
'''simple docstring'''
return run_hp_search_sigopt(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
return default_hp_space_sigopt(lowerCamelCase_ )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''wandb'''
@staticmethod
def lowerCamelCase_ ( ):
'''simple docstring'''
return is_wandb_available()
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
return run_hp_search_wandb(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Tuple ):
'''simple docstring'''
return default_hp_space_wandb(lowerCamelCase_ )
__UpperCAmelCase = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def __A ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(lowerCamelCase_ ) > 0:
SCREAMING_SNAKE_CASE : List[Any] = available_backends[0].name
if len(lowerCamelCase_ ) > 1:
logger.info(
f'''{len(lowerCamelCase_ )} hyperparameter search backends available. Using {name} as the default.''' )
return name
raise RuntimeError(
"""No hyperparameter search backend available.\n"""
+ """\n""".join(
f''' - To install {backend.name} run {backend.pip_install()}'''
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 79
| 1
|
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = model.config
SCREAMING_SNAKE_CASE__ = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
SCREAMING_SNAKE_CASE__ = MBartConfig(
is_decoder=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , add_cross_attention=UpperCamelCase__ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=UpperCamelCase__ , add_final_layer_norm=UpperCamelCase__ , )
return encoder_config, decoder_config
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ):
if "encoder.model" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""encoder.model""" , """encoder""" )
if "decoder.model" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""decoder.model""" , """decoder""" )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if name.startswith("""encoder""" ):
if "layers" in name:
SCREAMING_SNAKE_CASE__ = """encoder.""" + name
if "attn.proj" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "mask" not in name:
SCREAMING_SNAKE_CASE__ = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "encoder.norm.weight":
SCREAMING_SNAKE_CASE__ = """encoder.layernorm.weight"""
if name == "encoder.norm.bias":
SCREAMING_SNAKE_CASE__ = """encoder.layernorm.bias"""
return name
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: Optional[int] ):
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE__ = orig_state_dict.pop(UpperCamelCase__ )
if "qkv" in key:
SCREAMING_SNAKE_CASE__ = key.split(""".""" )
SCREAMING_SNAKE_CASE__ = int(key_split[3] )
SCREAMING_SNAKE_CASE__ = int(key_split[5] )
SCREAMING_SNAKE_CASE__ = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
SCREAMING_SNAKE_CASE__ = val[:dim, :]
SCREAMING_SNAKE_CASE__ = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE__ = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE__ = val[:dim]
SCREAMING_SNAKE_CASE__ = val[dim : dim * 2]
SCREAMING_SNAKE_CASE__ = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
SCREAMING_SNAKE_CASE__ = val
return orig_state_dict
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] , UpperCamelCase__: int=None , UpperCamelCase__: str=False ):
# load original model
SCREAMING_SNAKE_CASE__ = DonutModel.from_pretrained(UpperCamelCase__ ).eval()
# load HuggingFace model
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_configs(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = DonutSwinModel(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = MBartForCausalLM(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = VisionEncoderDecoderModel(encoder=UpperCamelCase__ , decoder=UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE__ = original_model.state_dict()
SCREAMING_SNAKE_CASE__ = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
# verify results on scanned document
SCREAMING_SNAKE_CASE__ = load_dataset("""hf-internal-testing/example-documents""" )
SCREAMING_SNAKE_CASE__ = dataset["""test"""][0]["""image"""].convert("""RGB""" )
SCREAMING_SNAKE_CASE__ = XLMRobertaTokenizerFast.from_pretrained(UpperCamelCase__ , from_slow=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
SCREAMING_SNAKE_CASE__ = DonutProcessor(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = processor(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
SCREAMING_SNAKE_CASE__ = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
SCREAMING_SNAKE_CASE__ = """When is the coffee break?"""
SCREAMING_SNAKE_CASE__ = task_prompt.replace("""{user_input}""" , UpperCamelCase__ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
SCREAMING_SNAKE_CASE__ = """<s_rvlcdip>"""
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
SCREAMING_SNAKE_CASE__ = """<s_cord>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
SCREAMING_SNAKE_CASE__ = """s_cord-v2>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
SCREAMING_SNAKE_CASE__ = """<s_zhtrainticket>"""
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
SCREAMING_SNAKE_CASE__ = """hello world"""
else:
raise ValueError("""Model name not supported""" )
SCREAMING_SNAKE_CASE__ = original_model.decoder.tokenizer(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors="""pt""" )[
"""input_ids"""
]
SCREAMING_SNAKE_CASE__ = original_model.encoder.model.patch_embed(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model.encoder.embeddings(UpperCamelCase__ )
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 )
# verify encoder hidden states
SCREAMING_SNAKE_CASE__ = original_model.encoder(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = model.encoder(UpperCamelCase__ ).last_hidden_state
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-2 )
# verify decoder hidden states
SCREAMING_SNAKE_CASE__ = original_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).logits
SCREAMING_SNAKE_CASE__ = model(UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ ).logits
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
model.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
processor.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='naver-clova-ix/donut-base-finetuned-docvqa',
required=False,
type=str,
help='Name of the original model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
required=False,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub.',
)
_lowerCamelCase = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 6
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : List[str] = logging.get_logger(__name__)
def a_ ( __a ):
A__ = DPTConfig()
if "large" in checkpoint_url:
A__ = 1024
A__ = 4096
A__ = 24
A__ = 16
A__ = [5, 11, 17, 23]
A__ = [256, 512, 1024, 1024]
A__ = (1, 384, 384)
if "ade" in checkpoint_url:
A__ = True
A__ = 150
A__ = '''huggingface/label-files'''
A__ = '''ade20k-id2label.json'''
A__ = json.load(open(cached_download(hf_hub_url(__a , __a , repo_type='''dataset''' ) ) , '''r''' ) )
A__ = {int(__a ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
A__ = [1, 150, 480, 480]
return config, expected_shape
def a_ ( __a ):
A__ = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(__a , __a )
def a_ ( __a ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
A__ = name.replace('''pretrained.model''' , '''dpt.encoder''' )
if "pretrained.model" in name:
A__ = name.replace('''pretrained.model''' , '''dpt.embeddings''' )
if "patch_embed" in name:
A__ = name.replace('''patch_embed''' , '''patch_embeddings''' )
if "pos_embed" in name:
A__ = name.replace('''pos_embed''' , '''position_embeddings''' )
if "attn.proj" in name:
A__ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "proj" in name and "project" not in name:
A__ = name.replace('''proj''' , '''projection''' )
if "blocks" in name:
A__ = name.replace('''blocks''' , '''layer''' )
if "mlp.fc1" in name:
A__ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
A__ = name.replace('''mlp.fc2''' , '''output.dense''' )
if "norm1" in name:
A__ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
A__ = name.replace('''norm2''' , '''layernorm_after''' )
if "scratch.output_conv" in name:
A__ = name.replace('''scratch.output_conv''' , '''head''' )
if "scratch" in name:
A__ = name.replace('''scratch''' , '''neck''' )
if "layer1_rn" in name:
A__ = name.replace('''layer1_rn''' , '''convs.0''' )
if "layer2_rn" in name:
A__ = name.replace('''layer2_rn''' , '''convs.1''' )
if "layer3_rn" in name:
A__ = name.replace('''layer3_rn''' , '''convs.2''' )
if "layer4_rn" in name:
A__ = name.replace('''layer4_rn''' , '''convs.3''' )
if "refinenet" in name:
A__ = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
A__ = name.replace(f'''refinenet{layer_idx}''' , f'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
A__ = name.replace('''out_conv''' , '''projection''' )
if "resConfUnit1" in name:
A__ = name.replace('''resConfUnit1''' , '''residual_layer1''' )
if "resConfUnit2" in name:
A__ = name.replace('''resConfUnit2''' , '''residual_layer2''' )
if "conv1" in name:
A__ = name.replace('''conv1''' , '''convolution1''' )
if "conv2" in name:
A__ = name.replace('''conv2''' , '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
A__ = name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
A__ = name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
A__ = name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
A__ = name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
A__ = name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
A__ = name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
A__ = name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
A__ = name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
A__ = name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
A__ = name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
A__ = name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
A__ = name.replace('''pretrained''' , '''dpt''' )
if "bn" in name:
A__ = name.replace('''bn''' , '''batch_norm''' )
if "head" in name:
A__ = name.replace('''head''' , '''head.head''' )
if "encoder.norm" in name:
A__ = name.replace('''encoder.norm''' , '''layernorm''' )
if "auxlayer" in name:
A__ = name.replace('''auxlayer''' , '''auxiliary_head.head''' )
return name
def a_ ( __a , __a ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
A__ = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[: config.hidden_size, :]
A__ = in_proj_bias[: config.hidden_size]
A__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ = in_proj_weight[
-config.hidden_size :, :
]
A__ = in_proj_bias[-config.hidden_size :]
def a_ ( ):
A__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A__ = Image.open(requests.get(__a , stream=__a ).raw )
return im
@torch.no_grad()
def a_ ( __a , __a , __a , __a ):
A__ , A__ = get_dpt_config(__a )
# load original state_dict from URL
A__ = torch.hub.load_state_dict_from_url(__a , map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(__a )
# rename keys
for key in state_dict.copy().keys():
A__ = state_dict.pop(__a )
A__ = val
# read in qkv matrices
read_in_q_k_v(__a , __a )
# load HuggingFace model
A__ = DPTForSemanticSegmentation(__a ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(__a )
model.load_state_dict(__a )
model.eval()
# Check outputs on an image
A__ = 480 if '''ade''' in checkpoint_url else 384
A__ = DPTImageProcessor(size=__a )
A__ = prepare_img()
A__ = image_processor(__a , return_tensors='''pt''' )
# forward pass
A__ = model(**__a ).logits if '''ade''' in checkpoint_url else model(**__a ).predicted_depth
# Assert logits
A__ = torch.tensor([[6.31_99, 6.36_29, 6.41_48], [6.38_50, 6.36_15, 6.41_66], [6.35_19, 6.31_76, 6.35_75]] )
if "ade" in checkpoint_url:
A__ = torch.tensor([[4.04_80, 4.24_20, 4.43_60], [4.31_24, 4.56_93, 4.82_61], [4.57_68, 4.89_65, 5.21_63]] )
assert outputs.shape == torch.Size(__a )
assert (
torch.allclose(outputs[0, 0, :3, :3] , __a , atol=1e-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , __a )
)
Path(__a ).mkdir(exist_ok=__a )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__a )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__a )
if push_to_hub:
print('''Pushing model to hub...''' )
model.push_to_hub(
repo_path_or_name=Path(__a , __a ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=__a , )
image_processor.push_to_hub(
repo_path_or_name=Path(__a , __a ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=__a , )
if __name__ == "__main__":
__snake_case : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
__snake_case : Dict = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 571
| 0
|
from __future__ import annotations
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if len(_SCREAMING_SNAKE_CASE ) == 0:
return False
SCREAMING_SNAKE_CASE_ = len(_SCREAMING_SNAKE_CASE ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , _SCREAMING_SNAKE_CASE )
else:
return binary_search(a_list[midpoint + 1 :] , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCamelCase__ : Union[str, Any] = input("Enter numbers separated by comma:\n").strip()
UpperCamelCase__ : Union[str, Any] = [int(item.strip()) for item in user_input.split(",")]
UpperCamelCase__ : Dict = int(input("Enter the number to be found in the list:\n").strip())
UpperCamelCase__ : List[Any] = "" if binary_search(sequence, target) else "not "
print(F'{target} was {not_str}found in {sequence}')
| 620
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
UpperCamelCase__ : Tuple = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
UpperCamelCase__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 620
| 1
|
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
snake_case = random.Random()
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_=1.0 , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> Optional[Any]:
if rng is None:
_snake_case = global_rng
_snake_case = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase ( unittest.TestCase ):
def __init__( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : int=7 , __lowerCamelCase : List[Any]=4_0_0 , __lowerCamelCase : Optional[int]=2_0_0_0 , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : Optional[Any]=0.0 , __lowerCamelCase : List[Any]=1_6_0_0_0 , __lowerCamelCase : Any=True , __lowerCamelCase : Optional[int]=True , ):
"""simple docstring"""
_snake_case = parent
_snake_case = batch_size
_snake_case = min_seq_length
_snake_case = max_seq_length
_snake_case = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_snake_case = feature_size
_snake_case = padding_value
_snake_case = sampling_rate
_snake_case = return_attention_mask
_snake_case = do_normalize
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Any=False , __lowerCamelCase : Optional[int]=False ):
"""simple docstring"""
def _flatten(__lowerCamelCase : Any ):
return list(itertools.chain(*__lowerCamelCase ) )
if equal_length:
_snake_case = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
_snake_case = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_snake_case = [np.asarray(__lowerCamelCase ) for x in speech_inputs]
return speech_inputs
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE,unittest.TestCase ):
A__ : Tuple = WavaVecaFeatureExtractor
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_snake_case = WavaVecaFeatureExtractionTester(self )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
self.assertTrue(np.all(np.mean(__lowerCamelCase , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(__lowerCamelCase , axis=0 ) - 1 ) < 1E-3 ) )
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
# Tests that all call wrap to encode_plus and batch_encode_plus
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_snake_case = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_snake_case = [np.asarray(__lowerCamelCase ) for speech_input in speech_inputs]
# Test not batched input
_snake_case = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
_snake_case = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-3 ) )
# Test batched
_snake_case = feat_extract(__lowerCamelCase , return_tensors='''np''' ).input_values
_snake_case = feat_extract(__lowerCamelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
_snake_case = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_snake_case = np.asarray(__lowerCamelCase )
_snake_case = feat_extract(__lowerCamelCase , return_tensors='''np''' ).input_values
_snake_case = feat_extract(__lowerCamelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-3 ) )
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_snake_case = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_snake_case = ['''longest''', '''max_length''', '''do_not_pad''']
_snake_case = [None, 1_6_0_0, None]
for max_length, padding in zip(__lowerCamelCase , __lowerCamelCase ):
_snake_case = feat_extract(__lowerCamelCase , padding=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors='''np''' )
_snake_case = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self.assertTrue(input_values[0][8_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_snake_case = range(8_0_0 , 1_4_0_0 , 2_0_0 )
_snake_case = [floats_list((1, x) )[0] for x in lengths]
_snake_case = ['''longest''', '''max_length''', '''do_not_pad''']
_snake_case = [None, 1_6_0_0, None]
for max_length, padding in zip(__lowerCamelCase , __lowerCamelCase ):
_snake_case = feat_extract(__lowerCamelCase , max_length=__lowerCamelCase , padding=__lowerCamelCase )
_snake_case = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_snake_case = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_snake_case = feat_extract(
__lowerCamelCase , truncation=__lowerCamelCase , max_length=1_0_0_0 , padding='''max_length''' , return_tensors='''np''' )
_snake_case = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_snake_case = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_snake_case = feat_extract(
__lowerCamelCase , truncation=__lowerCamelCase , max_length=1_0_0_0 , padding='''longest''' , return_tensors='''np''' )
_snake_case = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0) )
_snake_case = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_snake_case = feat_extract(
__lowerCamelCase , truncation=__lowerCamelCase , max_length=2_0_0_0 , padding='''longest''' , return_tensors='''np''' )
_snake_case = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0) )
@require_torch
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
import torch
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_snake_case = np.random.rand(1_0_0 ).astype(np.floataa )
_snake_case = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_snake_case = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
_snake_case = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
_snake_case = WavaVecaConfig.from_pretrained(__lowerCamelCase )
_snake_case = WavaVecaFeatureExtractor.from_pretrained(__lowerCamelCase )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == '''layer''' )
| 103
|
"""simple docstring"""
from copy import deepcopy
class UpperCAmelCase :
def __init__( self : Optional[Any] , __lowerCamelCase : list[int] | None = None , __lowerCamelCase : int | None = None ):
"""simple docstring"""
if arr is None and size is not None:
_snake_case = size
_snake_case = [0] * size
elif arr is not None:
self.init(__lowerCamelCase )
else:
raise ValueError('''Either arr or size must be specified''' )
def __UpperCAmelCase ( self : Any , __lowerCamelCase : list[int] ):
"""simple docstring"""
_snake_case = len(__lowerCamelCase )
_snake_case = deepcopy(__lowerCamelCase )
for i in range(1 , self.size ):
_snake_case = self.next_(__lowerCamelCase )
if j < self.size:
self.tree[j] += self.tree[i]
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_snake_case = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
_snake_case = self.next_(__lowerCamelCase )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def __UpperCAmelCase ( __lowerCamelCase : int ):
"""simple docstring"""
return index + (index & (-index))
@staticmethod
def __UpperCAmelCase ( __lowerCamelCase : int ):
"""simple docstring"""
return index - (index & (-index))
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
_snake_case = self.next_(__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
self.add(__lowerCamelCase , value - self.get(__lowerCamelCase ) )
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : int ):
"""simple docstring"""
if right == 0:
return 0
_snake_case = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
_snake_case = self.prev(__lowerCamelCase )
return result
def __UpperCAmelCase ( self : str , __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
return self.prefix(__lowerCamelCase ) - self.prefix(__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : int ):
"""simple docstring"""
return self.query(__lowerCamelCase , index + 1 )
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : int ):
"""simple docstring"""
value -= self.tree[0]
if value < 0:
return -1
_snake_case = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
_snake_case = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 103
| 1
|
'''simple docstring'''
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class A_ :
def __init__( self : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : int=9_9 , snake_case_ : Union[str, Any]=1_3 , snake_case_ : str=7 , snake_case_ : Dict=9 , snake_case_ : str=True , snake_case_ : Dict=True , snake_case_ : Tuple=False , snake_case_ : List[Any]=3_2 , snake_case_ : str=5 , snake_case_ : Union[str, Any]=4 , snake_case_ : List[str]=3_7 , snake_case_ : Optional[Any]=8 , snake_case_ : List[str]=0.1 , snake_case_ : Optional[int]=0.0_0_2 , snake_case_ : Tuple=1 , snake_case_ : str=0 , snake_case_ : Dict=0 , snake_case_ : List[str]=None , snake_case_ : Tuple=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = encoder_seq_length
_UpperCAmelCase = decoder_seq_length
# For common tests
_UpperCAmelCase = self.decoder_seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_attention_mask
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = d_ff
_UpperCAmelCase = relative_attention_num_buckets
_UpperCAmelCase = dropout_rate
_UpperCAmelCase = initializer_factor
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = pad_token_id
_UpperCAmelCase = decoder_start_token_id
_UpperCAmelCase = None
_UpperCAmelCase = decoder_layers
def lowercase ( self : List[Any] ):
return TaConfig.from_pretrained("google/umt5-base" )
def lowercase ( self : List[Any] , snake_case_ : Tuple , snake_case_ : List[str] , snake_case_ : str , snake_case_ : Optional[int]=None , snake_case_ : Dict=None , snake_case_ : Union[str, Any]=None , snake_case_ : Optional[int]=None , snake_case_ : List[str]=None , ):
if attention_mask is None:
_UpperCAmelCase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_UpperCAmelCase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_UpperCAmelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=snake_case_ )
if decoder_head_mask is None:
_UpperCAmelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=snake_case_ )
if cross_attn_head_mask is None:
_UpperCAmelCase = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=snake_case_ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def lowercase ( self : Optional[Any] ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_UpperCAmelCase = input_ids.clamp(self.pad_token_id + 1 )
_UpperCAmelCase = decoder_input_ids.clamp(self.pad_token_id + 1 )
_UpperCAmelCase = self.get_config()
_UpperCAmelCase = config.num_attention_heads
_UpperCAmelCase = self.prepare_inputs_dict(snake_case_ , snake_case_ , snake_case_ )
return config, input_dict
def lowercase ( self : Optional[int] ):
_UpperCAmelCase , _UpperCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase ( self : Tuple ):
return TaConfig(
vocab_size=1_6_6 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowercase ( self : Optional[int] ):
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowercase ( self : Any , snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : str , snake_case_ : List[Any] , snake_case_ : List[Any] , snake_case_ : List[Any] , ):
_UpperCAmelCase = UMTaModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(
input_ids=snake_case_ , decoder_input_ids=snake_case_ , attention_mask=snake_case_ , decoder_attention_mask=snake_case_ , )
_UpperCAmelCase = model(input_ids=snake_case_ , decoder_input_ids=snake_case_ )
_UpperCAmelCase = result.last_hidden_state
_UpperCAmelCase = result.past_key_values
_UpperCAmelCase = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(snake_case_ ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def lowercase ( self : str , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : Any , snake_case_ : List[str] , ):
_UpperCAmelCase = UMTaModel(config=snake_case_ ).get_decoder().to(snake_case_ ).eval()
# first forward pass
_UpperCAmelCase = model(snake_case_ , use_cache=snake_case_ )
_UpperCAmelCase = model(snake_case_ )
_UpperCAmelCase = model(snake_case_ , use_cache=snake_case_ )
self.parent.assertTrue(len(snake_case_ ) == len(snake_case_ ) )
self.parent.assertTrue(len(snake_case_ ) == len(snake_case_ ) + 1 )
_UpperCAmelCase , _UpperCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
_UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_UpperCAmelCase = model(snake_case_ )["last_hidden_state"]
_UpperCAmelCase = model(snake_case_ , past_key_values=snake_case_ )["last_hidden_state"]
# select random slice
_UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_UpperCAmelCase = output_from_no_past[:, -1, random_slice_idx].detach()
_UpperCAmelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case_ , snake_case_ , atol=1e-3 ) )
def lowercase ( self : List[Any] , snake_case_ : Tuple , snake_case_ : Tuple , ):
_UpperCAmelCase = UMTaModel(config=snake_case_ ).to(snake_case_ ).half().eval()
_UpperCAmelCase = model(**snake_case_ )["last_hidden_state"]
self.parent.assertFalse(torch.isnan(snake_case_ ).any().item() )
@require_torch
class A_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : Optional[int] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
_lowerCamelCase : Dict = (UMTaForConditionalGeneration,) if is_torch_available() else ()
_lowerCamelCase : Tuple = (
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
_lowerCamelCase : List[Any] = True
_lowerCamelCase : Tuple = False
_lowerCamelCase : Tuple = False
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : List[str] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
_lowerCamelCase : int = [0.8, 0.9]
def lowercase ( self : List[str] ):
_UpperCAmelCase = UMTaModelTester(self )
@unittest.skip("Test has a segmentation fault on torch 1.8.0" )
def lowercase ( self : Dict ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase = UMTaModel(config_and_inputs[0] ).to(snake_case_ )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
snake_case_ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'{tmpdirname}/t5_test.onnx' , export_params=snake_case_ , opset_version=9 , input_names=["input_ids", "decoder_input_ids"] , )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*snake_case_ )
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = ["encoder_attentions", "decoder_attentions", "cross_attentions"]
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase = config_and_inputs[0]
_UpperCAmelCase = UMTaForConditionalGeneration(snake_case_ ).eval()
model.to(snake_case_ )
_UpperCAmelCase = {
"head_mask": torch.zeros(config.num_layers , config.num_heads , device=snake_case_ ),
"decoder_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=snake_case_ ),
"cross_attn_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=snake_case_ ),
}
for attn_name, (name, mask) in zip(snake_case_ , head_masking.items() ):
_UpperCAmelCase = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_UpperCAmelCase = torch.ones(
config.num_decoder_layers , config.num_heads , device=snake_case_ )
_UpperCAmelCase = model.generate(
config_and_inputs[1]["input_ids"] , num_beams=1 , max_length=3 , output_attentions=snake_case_ , return_dict_in_generate=snake_case_ , **snake_case_ , )
# We check the state of decoder_attentions and cross_attentions just from the last step
_UpperCAmelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("Does not work on the tiny model as we keep hitting edge cases." )
def lowercase ( self : Dict ):
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ ( unittest.TestCase ):
@slow
@unittest.skip(
"Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" )
def lowercase ( self : Any ):
_UpperCAmelCase = UMTaForConditionalGeneration.from_pretrained("google/umt5-small" , return_dict=snake_case_ ).to(snake_case_ )
_UpperCAmelCase = AutoTokenizer.from_pretrained("google/umt5-small" , use_fast=snake_case_ , legacy=snake_case_ )
_UpperCAmelCase = [
"Bonjour monsieur <extra_id_0> bien <extra_id_1>.",
"No se como puedo <extra_id_0>.",
"This is the reason why we <extra_id_0> them.",
"The <extra_id_0> walks in <extra_id_1>, seats",
"A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.",
]
_UpperCAmelCase = tokenizer(snake_case_ , return_tensors="pt" , padding=snake_case_ ).input_ids
# fmt: off
_UpperCAmelCase = torch.tensor(
[
[ 3_8_5_3_0, 2_1_0_7_0_3, 2_5_6_2_9_9, 1_4_1_0, 2_5_6_2_9_8, 2_7_4, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_2_6, 3_2_1, 6_7_1, 2_5_9_2_2, 2_5_6_2_9_9, 2_7_4, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1_4_6_0, 3_3_9, 3_1_2, 1_9_0_1_4, 1_0_6_2_0, 7_5_8, 2_5_6_2_9_9, 2_3_5_5,2_7_4, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_1_7, 2_5_6_2_9_9, 1_4_8_6_9, 2_8_1, 3_0_1, 2_5_6_2_9_8, 2_7_5, 1_1_9_9_8_3,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_2_0, 2_5_6_2_9_9, 1_4_8_6_9, 2_8_1, 2_2_3_4, 2_8_9, 2_2_7_5, 3_3_3,6_1_3_9_1, 2_8_9, 2_5_6_2_9_8, 5_4_3, 2_5_6_2_9_7, 1_6_8_7_1_4, 3_2_9, 2_5_6_2_9_6,2_7_4, 1],
] )
# fmt: on
torch.testing.assert_allclose(snake_case_ , snake_case_ )
_UpperCAmelCase = model.generate(input_ids.to(snake_case_ ) )
_UpperCAmelCase = [
"<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>",
"<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
]
_UpperCAmelCase = tokenizer.batch_decode(snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
| 119
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class A_ ( unittest.TestCase ):
def lowercase ( self : List[str] ):
_UpperCAmelCase = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
_UpperCAmelCase = get_activation("gelu" )
self.assertTrue(torch.allclose(gelu_python(snake_case_ ) , torch_builtin(snake_case_ ) ) )
self.assertFalse(torch.allclose(gelu_python(snake_case_ ) , gelu_new(snake_case_ ) ) )
def lowercase ( self : int ):
_UpperCAmelCase = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
_UpperCAmelCase = get_activation("gelu" )
_UpperCAmelCase = get_activation("gelu_10" )
_UpperCAmelCase = torch_builtin(snake_case_ )
_UpperCAmelCase = geluaa(snake_case_ )
_UpperCAmelCase = torch.where(y_gelu_aa < 1_0.0 , 1 , 0 )
self.assertTrue(torch.max(snake_case_ ).item() == 1_0.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def lowercase ( self : Any ):
get_activation("gelu" )
get_activation("gelu_10" )
get_activation("gelu_fast" )
get_activation("gelu_new" )
get_activation("gelu_python" )
get_activation("gelu_pytorch_tanh" )
get_activation("linear" )
get_activation("mish" )
get_activation("quick_gelu" )
get_activation("relu" )
get_activation("sigmoid" )
get_activation("silu" )
get_activation("swish" )
get_activation("tanh" )
with self.assertRaises(snake_case_ ):
get_activation("bogus" )
with self.assertRaises(snake_case_ ):
get_activation(snake_case_ )
def lowercase ( self : Dict ):
_UpperCAmelCase = get_activation("gelu" )
_UpperCAmelCase = 1
_UpperCAmelCase = get_activation("gelu" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(snake_case_ ):
_UpperCAmelCase = acta.a
| 119
| 1
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
snake_case__ : str = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : List[Any] = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
snake_case__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 392
|
import tensorflow as tf
from ...tf_utils import shape_list
class snake_case ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : List[Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : int , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=1 , lowerCamelCase_ : Tuple=False , **lowerCamelCase_ : Dict ) ->Union[str, Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = d_embed
UpperCAmelCase__ = d_proj
UpperCAmelCase__ = cutoffs + [vocab_size]
UpperCAmelCase__ = [0] + self.cutoffs
UpperCAmelCase__ = div_val
UpperCAmelCase__ = self.cutoffs[0]
UpperCAmelCase__ = len(self.cutoffs ) - 1
UpperCAmelCase__ = self.shortlist_size + self.n_clusters
UpperCAmelCase__ = keep_order
UpperCAmelCase__ = []
UpperCAmelCase__ = []
def UpperCAmelCase ( self : Dict , lowerCamelCase_ : Union[str, Any] ) ->Any:
'''simple docstring'''
if self.n_clusters > 0:
UpperCAmelCase__ = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="""zeros""" , trainable=lowerCamelCase_ , name="""cluster_weight""" )
UpperCAmelCase__ = self.add_weight(
shape=(self.n_clusters,) , initializer="""zeros""" , trainable=lowerCamelCase_ , name="""cluster_bias""" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
UpperCAmelCase__ = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="""zeros""" , trainable=lowerCamelCase_ , name=f'''out_projs_._{i}''' , )
self.out_projs.append(lowerCamelCase_ )
else:
self.out_projs.append(lowerCamelCase_ )
UpperCAmelCase__ = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="""zeros""" , trainable=lowerCamelCase_ , name=f'''out_layers_._{i}_._weight''' , )
UpperCAmelCase__ = self.add_weight(
shape=(self.vocab_size,) , initializer="""zeros""" , trainable=lowerCamelCase_ , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
UpperCAmelCase__ , UpperCAmelCase__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase__ = self.d_embed // (self.div_val**i)
UpperCAmelCase__ = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="""zeros""" , trainable=lowerCamelCase_ , name=f'''out_projs_._{i}''' )
self.out_projs.append(lowerCamelCase_ )
UpperCAmelCase__ = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="""zeros""" , trainable=lowerCamelCase_ , name=f'''out_layers_._{i}_._weight''' , )
UpperCAmelCase__ = self.add_weight(
shape=(r_idx - l_idx,) , initializer="""zeros""" , trainable=lowerCamelCase_ , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
super().build(lowerCamelCase_ )
@staticmethod
def UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[Any]=None ) ->Any:
'''simple docstring'''
UpperCAmelCase__ = x
if proj is not None:
UpperCAmelCase__ = tf.einsum("""ibd,ed->ibe""" , lowerCamelCase_ , lowerCamelCase_ )
return tf.einsum("""ibd,nd->ibn""" , lowerCamelCase_ , lowerCamelCase_ ) + b
@staticmethod
def UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] ) ->Any:
'''simple docstring'''
UpperCAmelCase__ = shape_list(lowerCamelCase_ )
UpperCAmelCase__ = tf.range(lp_size[0] , dtype=target.dtype )
UpperCAmelCase__ = tf.stack([r, target] , 1 )
return tf.gather_nd(lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase ( self : int , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Dict=True , lowerCamelCase_ : Dict=False ) ->Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ = 0
if self.n_clusters == 0:
UpperCAmelCase__ = self._logit(lowerCamelCase_ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
UpperCAmelCase__ = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=lowerCamelCase_ , logits=lowerCamelCase_ )
UpperCAmelCase__ = tf.nn.log_softmax(lowerCamelCase_ , axis=-1 )
else:
UpperCAmelCase__ = shape_list(lowerCamelCase_ )
UpperCAmelCase__ = []
UpperCAmelCase__ = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
UpperCAmelCase__ , UpperCAmelCase__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
UpperCAmelCase__ = (target >= l_idx) & (target < r_idx)
UpperCAmelCase__ = tf.where(lowerCamelCase_ )
UpperCAmelCase__ = tf.boolean_mask(lowerCamelCase_ , lowerCamelCase_ ) - l_idx
if self.div_val == 1:
UpperCAmelCase__ = self.out_layers[0][0][l_idx:r_idx]
UpperCAmelCase__ = self.out_layers[0][1][l_idx:r_idx]
else:
UpperCAmelCase__ = self.out_layers[i][0]
UpperCAmelCase__ = self.out_layers[i][1]
if i == 0:
UpperCAmelCase__ = tf.concat([cur_W, self.cluster_weight] , 0 )
UpperCAmelCase__ = tf.concat([cur_b, self.cluster_bias] , 0 )
UpperCAmelCase__ = self._logit(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , self.out_projs[0] )
UpperCAmelCase__ = tf.nn.log_softmax(lowerCamelCase_ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
UpperCAmelCase__ = tf.boolean_mask(lowerCamelCase_ , lowerCamelCase_ )
UpperCAmelCase__ = self._gather_logprob(lowerCamelCase_ , lowerCamelCase_ )
else:
UpperCAmelCase__ = self._logit(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , self.out_projs[i] )
UpperCAmelCase__ = tf.nn.log_softmax(lowerCamelCase_ )
UpperCAmelCase__ = self.cutoffs[0] + i - 1 # No probability for the head cluster
UpperCAmelCase__ = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(lowerCamelCase_ )
if target is not None:
UpperCAmelCase__ = tf.boolean_mask(lowerCamelCase_ , lowerCamelCase_ )
UpperCAmelCase__ = tf.boolean_mask(lowerCamelCase_ , lowerCamelCase_ )
UpperCAmelCase__ = self._gather_logprob(lowerCamelCase_ , lowerCamelCase_ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(lowerCamelCase_ , -cur_logprob , shape_list(lowerCamelCase_ ) )
UpperCAmelCase__ = tf.concat(lowerCamelCase_ , axis=-1 )
if target is not None:
if return_mean:
UpperCAmelCase__ = tf.reduce_mean(lowerCamelCase_ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(lowerCamelCase_ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(lowerCamelCase_ , name=self.name , aggregation="""mean""" if return_mean else """""" )
return out
| 392
| 1
|
"""simple docstring"""
def _UpperCamelCase ( _A ) -> int:
"""simple docstring"""
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
_UpperCAmelCase = 1
_UpperCAmelCase = 1
while repunit:
_UpperCAmelCase = (1_0 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def _UpperCamelCase ( _A = 1_0_0_0_0_0_0 ) -> int:
"""simple docstring"""
_UpperCAmelCase = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(_A ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F"{solution() = }")
| 715
|
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
a : List[Any] = get_logger()
a : Optional[dict] = None
class a_ ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
def __init__( self : int , __UpperCamelCase : List[str]=None , __UpperCamelCase : Optional[int]=None , **__UpperCamelCase : int ) ->Tuple:
'''simple docstring'''
super().__init__(features=__UpperCamelCase )
import jax
from jaxlib.xla_client import Device
if isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError(
f"""Expected {device} to be a `str` not {type(__UpperCamelCase )}, as `jaxlib.xla_extension.Device` """
"""is not serializable neither with `pickle` nor with `dill`. Instead you can surround """
"""the device with `str()` to get its string identifier that will be internally mapped """
"""to the actual `jaxlib.xla_extension.Device`.""" )
_UpperCAmelCase = device if isinstance(__UpperCamelCase , __UpperCamelCase ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_UpperCAmelCase = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f"""Device with string identifier {self.device} not listed among the available """
f"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
f"""device: {str(jax.devices()[0] )}.""" )
_UpperCAmelCase = str(jax.devices()[0] )
_UpperCAmelCase = jnp_array_kwargs
@staticmethod
def _snake_case ( ) ->Dict[str, "jaxlib.xla_extension.Device"]:
'''simple docstring'''
import jax
return {str(__UpperCamelCase ): device for device in jax.devices()}
def _snake_case ( self : Dict , __UpperCamelCase : Any ) ->Union[str, Any]:
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(__UpperCamelCase , __UpperCamelCase ) and column:
if all(
isinstance(__UpperCamelCase , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__UpperCamelCase , axis=0 )
return column
def _snake_case ( self : List[str] , __UpperCamelCase : Any ) ->Optional[int]:
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(__UpperCamelCase , (str, bytes, type(__UpperCamelCase )) ):
return value
elif isinstance(__UpperCamelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
_UpperCAmelCase = {}
if isinstance(__UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
_UpperCAmelCase = {"""dtype""": jnp.intaa}
else:
_UpperCAmelCase = {"""dtype""": jnp.intaa}
elif isinstance(__UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
_UpperCAmelCase = {"""dtype""": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__UpperCamelCase , PIL.Image.Image ):
_UpperCAmelCase = np.asarray(__UpperCamelCase )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_UpperCAmelCase = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__UpperCamelCase , **{**default_dtype, **self.jnp_array_kwargs} )
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : List[str] ) ->Any:
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__UpperCamelCase , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__UpperCamelCase , """__array__""" ) and not isinstance(__UpperCamelCase , jax.Array ):
_UpperCAmelCase = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__UpperCamelCase , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__UpperCamelCase ) for substruct in data_struct] )
elif isinstance(__UpperCamelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__UpperCamelCase ) for substruct in data_struct] )
return self._tensorize(__UpperCamelCase )
def _snake_case ( self : List[str] , __UpperCamelCase : dict ) ->int:
'''simple docstring'''
return map_nested(self._recursive_tensorize , __UpperCamelCase , map_list=__UpperCamelCase )
def _snake_case ( self : Dict , __UpperCamelCase : pa.Table ) ->Mapping:
'''simple docstring'''
_UpperCAmelCase = self.numpy_arrow_extractor().extract_row(__UpperCamelCase )
_UpperCAmelCase = self.python_features_decoder.decode_row(__UpperCamelCase )
return self.recursive_tensorize(__UpperCamelCase )
def _snake_case ( self : Optional[int] , __UpperCamelCase : pa.Table ) ->"jax.Array":
'''simple docstring'''
_UpperCAmelCase = self.numpy_arrow_extractor().extract_column(__UpperCamelCase )
_UpperCAmelCase = self.python_features_decoder.decode_column(__UpperCamelCase , pa_table.column_names[0] )
_UpperCAmelCase = self.recursive_tensorize(__UpperCamelCase )
_UpperCAmelCase = self._consolidate(__UpperCamelCase )
return column
def _snake_case ( self : Optional[Any] , __UpperCamelCase : pa.Table ) ->Mapping:
'''simple docstring'''
_UpperCAmelCase = self.numpy_arrow_extractor().extract_batch(__UpperCamelCase )
_UpperCAmelCase = self.python_features_decoder.decode_batch(__UpperCamelCase )
_UpperCAmelCase = self.recursive_tensorize(__UpperCamelCase )
for column_name in batch:
_UpperCAmelCase = self._consolidate(batch[column_name] )
return batch
| 19
| 0
|
import math
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Tuple = []
lowerCamelCase : Dict = 2
lowerCamelCase : List[str] = int(math.sqrt(_a ) ) # Size of every segment
lowerCamelCase : Optional[int] = [True] * (end + 1)
lowerCamelCase : Any = []
while start <= end:
if temp[start] is True:
in_prime.append(_a )
for i in range(start * start , end + 1 , _a ):
lowerCamelCase : str = False
start += 1
prime += in_prime
lowerCamelCase : Dict = end + 1
lowerCamelCase : List[Any] = min(2 * end , _a )
while low <= n:
lowerCamelCase : Tuple = [True] * (high - low + 1)
for each in in_prime:
lowerCamelCase : Union[str, Any] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(_a , high + 1 , _a ):
lowerCamelCase : List[str] = False
for j in range(len(_a ) ):
if temp[j] is True:
prime.append(j + low )
lowerCamelCase : List[Any] = high + 1
lowerCamelCase : Tuple = min(high + end , _a )
return prime
print(sieve(10**6))
| 340
|
"""simple docstring"""
from __future__ import annotations
def _A ( _a : list[float] , _a : list[float] ):
"""simple docstring"""
A = sorted(numsa + numsa )
A , A = divmod(len(_a ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase =[float(x) for x in input("Enter the elements of first array: ").split()]
UpperCAmelCase =[float(x) for x in input("Enter the elements of second array: ").split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 617
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ =logging.get_logger(__name__)
lowerCAmelCase__ ="▁"
lowerCAmelCase__ ={"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"}
lowerCAmelCase__ ={
"vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model",
},
"monolingual_vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt",
},
}
lowerCAmelCase__ ={"vinai/bartpho-syllable": 1_024}
class A__( __magic_name__ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple="<s>" , __SCREAMING_SNAKE_CASE : Optional[Any]="</s>" , __SCREAMING_SNAKE_CASE : Optional[Any]="</s>" , __SCREAMING_SNAKE_CASE : Optional[int]="<s>" , __SCREAMING_SNAKE_CASE : Tuple="<unk>" , __SCREAMING_SNAKE_CASE : str="<pad>" , __SCREAMING_SNAKE_CASE : Any="<mask>" , __SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **__SCREAMING_SNAKE_CASE : List[str] , ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
__SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = vocab_file
__SCREAMING_SNAKE_CASE = monolingual_vocab_file
__SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__SCREAMING_SNAKE_CASE ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(__SCREAMING_SNAKE_CASE ) not in self.fairseq_tokens_to_ids:
__SCREAMING_SNAKE_CASE = cnt
cnt += 1
with open(__SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
__SCREAMING_SNAKE_CASE = line.strip().split()[0]
__SCREAMING_SNAKE_CASE = len(self.fairseq_tokens_to_ids )
if str(__SCREAMING_SNAKE_CASE ) not in self.fairseq_tokens_to_ids:
__SCREAMING_SNAKE_CASE = len(self.fairseq_tokens_to_ids )
__SCREAMING_SNAKE_CASE = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : str ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.__dict__.copy()
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [self.cls_token_id]
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _a ( self : Any , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None , __SCREAMING_SNAKE_CASE : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _a ( self : Tuple ) -> int:
"""simple docstring"""
return len(self.fairseq_ids_to_tokens )
def _a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any ) -> Any:
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ''''''.join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , ''' ''' ).strip()
return out_string
def _a ( self : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__SCREAMING_SNAKE_CASE = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__SCREAMING_SNAKE_CASE = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
__SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f"""{str(__SCREAMING_SNAKE_CASE )} \n""" )
return out_vocab_file, out_monolingual_vocab_file
| 690
|
"""simple docstring"""
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase__ =logging.get_logger(__name__)
lowerCAmelCase__ ={"vocab_file": "spiece.model"}
lowerCAmelCase__ ={
"vocab_file": {
"AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model",
}
}
lowerCAmelCase__ ={
"AI-Sweden/gpt-sw3-126m": 2_048,
"AI-Sweden/gpt-sw3-350m": 2_048,
"AI-Sweden/gpt-sw3-1.6b": 2_048,
"AI-Sweden/gpt-sw3-6.7b": 2_048,
"AI-Sweden/gpt-sw3-20b": 2_048,
}
class A__( __magic_name__ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **__SCREAMING_SNAKE_CASE : Dict , ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
__SCREAMING_SNAKE_CASE = kwargs.get('''name_or_path''' )
if name_or_path is None:
logger.warning(
'''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'''
''' you are testing the model, this can safely be ignored''' )
__SCREAMING_SNAKE_CASE = '''None'''
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
__SCREAMING_SNAKE_CASE = '''<|endoftext|>''' if eos_token is None else eos_token
__SCREAMING_SNAKE_CASE = '''<unk>''' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
__SCREAMING_SNAKE_CASE = unk_token if pad_token is None else pad_token
__SCREAMING_SNAKE_CASE = eos_token if bos_token is None else bos_token
else:
__SCREAMING_SNAKE_CASE = '''<pad>''' if pad_token is None else pad_token
__SCREAMING_SNAKE_CASE = '''<s>''' if bos_token is None else bos_token
super().__init__(
do_lower_case=__SCREAMING_SNAKE_CASE , remove_space=__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = do_lower_case
__SCREAMING_SNAKE_CASE = remove_space
__SCREAMING_SNAKE_CASE = keep_accents
__SCREAMING_SNAKE_CASE = vocab_file
__SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__SCREAMING_SNAKE_CASE )
# Used for whitespace normalization in input texts
# fmt : off
__SCREAMING_SNAKE_CASE = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', ''''''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
__SCREAMING_SNAKE_CASE = re.compile(
f"""[{"".join(map(__SCREAMING_SNAKE_CASE , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(1_27 , 1_60 ) ) + [1_60, 1_73, 82_03] ) )}]""" )
def __getstate__( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.__dict__.copy()
__SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self : int , __SCREAMING_SNAKE_CASE : Optional[int] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def _a ( self : Optional[Any] ) -> int:
"""simple docstring"""
return len(self.sp_model )
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : str ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.non_printing_characters_re.sub('''''' , __SCREAMING_SNAKE_CASE )
# Normalize whitespaces
__SCREAMING_SNAKE_CASE = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] )
# NFC Unicode normalization
__SCREAMING_SNAKE_CASE = unicodedata.normalize('''NFC''' , __SCREAMING_SNAKE_CASE )
return text
def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.preprocess_text(__SCREAMING_SNAKE_CASE )
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : str ) -> int:
"""simple docstring"""
return self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : int ) -> str:
"""simple docstring"""
return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE )
@staticmethod
def _a ( __SCREAMING_SNAKE_CASE : str ) -> str:
"""simple docstring"""
return out_string
def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = ''''''
__SCREAMING_SNAKE_CASE = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = []
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = False
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE )
return out_string
def _a ( self : Union[str, Any] ) -> Dict[str, int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__SCREAMING_SNAKE_CASE = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
__SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, List[str]] , __SCREAMING_SNAKE_CASE : Union[str, bool] = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = self.preprocess_text(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.sp_model.encode(__SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE = [self.preprocess_text(__SCREAMING_SNAKE_CASE ) for t in text]
__SCREAMING_SNAKE_CASE = self.sp_model.encode(__SCREAMING_SNAKE_CASE )
if return_tensors is True or return_tensors == "pt":
__SCREAMING_SNAKE_CASE = torch.tensor(__SCREAMING_SNAKE_CASE )
return token_ids
def _a ( self : Any , __SCREAMING_SNAKE_CASE : Union[int, List[int]] ) -> str:
"""simple docstring"""
return self.sp_model.decode(__SCREAMING_SNAKE_CASE )
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : "Conversation" ) -> List[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [f"""User: {text}""" if is_user else f"""Bot: {text}""" for is_user, text in conversation.iter_texts()]
__SCREAMING_SNAKE_CASE = (
f"""{self.eos_token}{self.bos_token}""" + f"""{self.bos_token}""".join(__SCREAMING_SNAKE_CASE ) + f"""{self.bos_token}Bot:"""
)
return self.encode(text=__SCREAMING_SNAKE_CASE )
| 690
| 1
|
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class UpperCAmelCase_ :
def __init__( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : int=1_3 , UpperCAmelCase__ : Any=7 , UpperCAmelCase__ : Tuple=False , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : List[Any]=False , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Tuple=3_3 , UpperCAmelCase__ : Optional[Any]=3_2 , UpperCAmelCase__ : List[Any]=5 , UpperCAmelCase__ : str=4 , UpperCAmelCase__ : Union[str, Any]=3_7 , UpperCAmelCase__ : Optional[Any]="gelu" , UpperCAmelCase__ : str=0.1 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : str=5_1_2 , UpperCAmelCase__ : List[Any]=1_6 , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : Union[str, Any]=0.02 , UpperCAmelCase__ : int=3 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : Union[str, Any]=None , ) -> List[str]:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self : Dict ) -> Tuple:
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] ) -> List[Any]:
lowerCAmelCase = EsmModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
lowerCAmelCase = model(UpperCAmelCase__ )
lowerCAmelCase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[Any] ) -> List[str]:
lowerCAmelCase = EsmForMaskedLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] ) -> Union[str, Any]:
lowerCAmelCase = self.num_labels
lowerCAmelCase = EsmForTokenClassification(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
lowerCamelCase : Tuple = False
lowerCamelCase : Optional[Any] = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase : List[str] = ()
lowerCamelCase : Optional[Any] = (
{
'''feature-extraction''': EsmModel,
'''fill-mask''': EsmForMaskedLM,
'''text-classification''': EsmForSequenceClassification,
'''token-classification''': EsmForTokenClassification,
'''zero-shot''': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase : Tuple = True
def __UpperCAmelCase ( self : Any ) -> List[Any]:
lowerCAmelCase = EsmModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=3_7 )
def __UpperCAmelCase ( self : List[str] ) -> Dict:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : int ) -> Dict:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase = type
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : Dict ) -> Any:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : str ) -> Any:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__ )
@slow
def __UpperCAmelCase ( self : int ) -> Tuple:
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = EsmModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()[0]
lowerCAmelCase = EsmEmbeddings(config=UpperCAmelCase__ )
lowerCAmelCase = torch.as_tensor([[1_2, 3_1, 1_3, model.padding_idx]] )
lowerCAmelCase = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
lowerCAmelCase = create_position_ids_from_input_ids(UpperCAmelCase__ , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(UpperCAmelCase__ , UpperCAmelCase__ ) ) )
def __UpperCAmelCase ( self : Dict ) -> Dict:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()[0]
lowerCAmelCase = EsmEmbeddings(config=UpperCAmelCase__ )
lowerCAmelCase = torch.empty(2 , 4 , 3_0 )
lowerCAmelCase = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
lowerCAmelCase = torch.as_tensor([expected_single_positions, expected_single_positions] )
lowerCAmelCase = embeddings.create_position_ids_from_inputs_embeds(UpperCAmelCase__ )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(UpperCAmelCase__ , UpperCAmelCase__ ) ) )
@unittest.skip('Esm does not support embedding resizing' )
def __UpperCAmelCase ( self : Any ) -> Optional[Any]:
pass
@unittest.skip('Esm does not support embedding resizing' )
def __UpperCAmelCase ( self : Tuple ) -> Any:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
pass
@require_torch
class UpperCAmelCase_ ( __lowercase ):
@slow
def __UpperCAmelCase ( self : List[Any] ) -> int:
with torch.no_grad():
lowerCAmelCase = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
lowerCAmelCase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase = model(UpperCAmelCase__ )[0]
lowerCAmelCase = 3_3
lowerCAmelCase = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , UpperCAmelCase__ )
lowerCAmelCase = torch.tensor(
[[[8.9_215, -10.5_898, -6.4_671], [-6.3_967, -13.9_114, -1.1_212], [-7.7_812, -13.9_516, -3.7_406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
with torch.no_grad():
lowerCAmelCase = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
lowerCAmelCase = torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
lowerCAmelCase = model(UpperCAmelCase__ )[0]
# compare the actual values for a slice.
lowerCAmelCase = torch.tensor(
[[[0.1_444, 0.5_413, 0.3_248], [0.3_034, 0.0_053, 0.3_108], [0.3_228, -0.2_499, 0.3_415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1E-4 ) )
| 133
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase : List[str] = RoCBertTokenizer
lowerCamelCase : str = None
lowerCamelCase : Dict = False
lowerCamelCase : Dict = True
lowerCamelCase : Any = filter_non_english
def __UpperCAmelCase ( self : Optional[int] ) -> str:
super().setUp()
lowerCAmelCase = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '你', '好', '是', '谁', 'a', 'b', 'c', 'd']
lowerCAmelCase = {}
lowerCAmelCase = {}
for i, value in enumerate(UpperCAmelCase__ ):
lowerCAmelCase = i
lowerCAmelCase = i
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_shape_file'] )
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_pronunciation_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.word_shape_file , 'w' , encoding='utf-8' ) as word_shape_writer:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__ )
with open(self.word_pronunciation_file , 'w' , encoding='utf-8' ) as word_pronunciation_writer:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[Any] ) -> int:
lowerCAmelCase = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCAmelCase = tokenizer.tokenize('你好[SEP]你是谁' )
self.assertListEqual(UpperCAmelCase__ , ['你', '好', '[SEP]', '你', '是', '谁'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase__ ) , [5, 6, 2, 5, 7, 8] )
def __UpperCAmelCase ( self : int ) -> str:
lowerCAmelCase = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def __UpperCAmelCase ( self : Tuple ) -> Tuple:
lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def __UpperCAmelCase ( self : Any ) -> Dict:
lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __UpperCAmelCase ( self : Tuple ) -> Dict:
lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __UpperCAmelCase ( self : Tuple ) -> Tuple:
lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def __UpperCAmelCase ( self : str ) -> Optional[int]:
lowerCAmelCase = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
lowerCAmelCase = {}
for i, token in enumerate(UpperCAmelCase__ ):
lowerCAmelCase = i
lowerCAmelCase = RoCBertWordpieceTokenizer(vocab=UpperCAmelCase__ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def __UpperCAmelCase ( self : Tuple ) -> Tuple:
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def __UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def __UpperCAmelCase ( self : Dict ) -> int:
lowerCAmelCase = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(UpperCAmelCase__ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
if self.test_rust_tokenizer:
lowerCAmelCase = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(UpperCAmelCase__ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
lowerCAmelCase = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
lowerCAmelCase = tokenizer_r.encode_plus(
UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , )
lowerCAmelCase = tokenizer_r.do_lower_case if hasattr(UpperCAmelCase__ , 'do_lower_case' ) else False
lowerCAmelCase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), 'Allen'),
((2_1, 2_3), '##NL'),
((2_3, 2_4), '##P'),
((2_5, 3_3), 'sentence'),
((3_3, 3_4), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), 'allen'),
((2_1, 2_3), '##nl'),
((2_3, 2_4), '##p'),
((2_5, 3_3), 'sentence'),
((3_3, 3_4), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def __UpperCAmelCase ( self : List[Any] ) -> List[str]:
lowerCAmelCase = ['的', '人', '有']
lowerCAmelCase = ''.join(UpperCAmelCase__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase = True
lowerCAmelCase = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
lowerCAmelCase = tokenizer_p.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
lowerCAmelCase = tokenizer_r.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase__ )
lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = False
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
lowerCAmelCase = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
lowerCAmelCase = tokenizer_r.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
lowerCAmelCase = tokenizer_p.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase__ )
lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase__ )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCAmelCase = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(UpperCAmelCase__ )
]
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
lowerCAmelCase = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCAmelCase = tokenizer.encode('你好' , add_special_tokens=UpperCAmelCase__ )
lowerCAmelCase = tokenizer.encode('你是谁' , add_special_tokens=UpperCAmelCase__ )
lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ )
lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
lowerCAmelCase = self.get_tokenizers(do_lower_case=UpperCAmelCase__ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowerCAmelCase = '你好,你是谁'
lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase__ )
lowerCAmelCase = tokenizer.convert_tokens_to_ids(UpperCAmelCase__ )
lowerCAmelCase = tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase__ )
lowerCAmelCase = tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase__ )
lowerCAmelCase = tokenizer.prepare_for_model(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
lowerCAmelCase = tokenizer.encode_plus(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 133
| 1
|
import os
def A ( a_ = "matrix.txt" ) -> int:
with open(os.path.join(os.path.dirname(a_ ) ,a_ ) ) as in_file:
__UpperCamelCase : str =in_file.read()
__UpperCamelCase : List[Any] =[[int(a_ ) for cell in row.split(',' )] for row in data.strip().splitlines()]
__UpperCamelCase : Dict =[[0 for cell in row] for row in grid]
__UpperCamelCase : str =len(grid[0] )
__UpperCamelCase : Optional[int] =[[0 for i in range(a_ )] for j in range(a_ )]
__UpperCamelCase : Any =grid[0][0]
for i in range(1 ,a_ ):
__UpperCamelCase : Any =grid[0][i] + dp[0][i - 1]
for i in range(1 ,a_ ):
__UpperCamelCase : Optional[Any] =grid[i][0] + dp[i - 1][0]
for i in range(1 ,a_ ):
for j in range(1 ,a_ ):
__UpperCamelCase : Optional[Any] =grid[i][j] + min(dp[i - 1][j] ,dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(f"{solution() = }")
| 709
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=a )
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : str =field(default="""image-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
UpperCamelCase__ : ClassVar[Features] =Features({"""image""": Image()} )
UpperCamelCase__ : ClassVar[Features] =Features({"""labels""": ClassLabel} )
UpperCamelCase__ : str ="image"
UpperCamelCase__ : str ="labels"
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , lowerCamelCase__ ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
__UpperCamelCase : List[str] =copy.deepcopy(self )
__UpperCamelCase : Optional[Any] =self.label_schema.copy()
__UpperCamelCase : List[Any] =features[self.label_column]
__UpperCamelCase : Optional[int] =label_schema
return task_template
@property
def __lowercase ( self ):
"""simple docstring"""
return {
self.image_column: "image",
self.label_column: "labels",
}
| 154
| 0
|
'''simple docstring'''
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : int,_SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def UpperCAmelCase__( ):
"""simple docstring"""
assert or_gate(0,0 ) == 0
assert or_gate(0,1 ) == 1
assert or_gate(1,0 ) == 1
assert or_gate(1,1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 186
|
'''simple docstring'''
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : str = "" ):
"""simple docstring"""
__A= url or 'https://www.imdb.com/chart/top/?ref_=nv_mv_250'
__A= BeautifulSoup(requests.get(_SCREAMING_SNAKE_CASE ).text,'html.parser' )
__A= soup.find_all('td',attrs='titleColumn' )
__A= soup.find_all('td',class_='ratingColumn imdbRating' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(_SCREAMING_SNAKE_CASE,_SCREAMING_SNAKE_CASE )
}
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : str = "IMDb_Top_250_Movies.csv" ):
"""simple docstring"""
__A= get_imdb_top_aaa_movies()
with open(_SCREAMING_SNAKE_CASE,'w',newline='' ) as out_file:
__A= csv.writer(_SCREAMING_SNAKE_CASE )
writer.writerow(['Movie title', 'IMDb rating'] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 186
| 1
|
'''simple docstring'''
def __lowerCamelCase ( __snake_case : int | float | str ) -> tuple[int, int]:
"""simple docstring"""
try:
A__ : Union[str, Any] =float(__snake_case )
except ValueError:
raise ValueError("""Please enter a valid number""" )
A__ : List[str] =decimal - int(__snake_case )
if fractional_part == 0:
return int(__snake_case ), 1
else:
A__ : Tuple =len(str(__snake_case ).split(""".""" )[1] )
A__ : Any =int(decimal * (10**number_of_frac_digits) )
A__ : Any =10**number_of_frac_digits
A__ : List[Any] =denominator, numerator
while True:
A__ : Tuple =dividend % divisor
if remainder == 0:
break
A__ : List[Any] =divisor, remainder
A__ : Optional[int] =numerator / divisor, denominator / divisor
return int(__snake_case ), int(__snake_case )
if __name__ == "__main__":
print(F"""{decimal_to_fraction(2) = }""")
print(F"""{decimal_to_fraction(89.0) = }""")
print(F"""{decimal_to_fraction('67') = }""")
print(F"""{decimal_to_fraction('45.0') = }""")
print(F"""{decimal_to_fraction(1.5) = }""")
print(F"""{decimal_to_fraction('6.25') = }""")
print(F"""{decimal_to_fraction('78td') = }""")
| 714
|
'''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase :
'''simple docstring'''
def __init__( self : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple=13 , lowerCAmelCase_ : Any=7 , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : str=99 , lowerCAmelCase_ : int=0 , lowerCAmelCase_ : str=32 , lowerCAmelCase_ : List[str]=5 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : List[Any]=5_12 , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : List[str]="last" , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[str]=0 , ) -> Tuple:
'''simple docstring'''
A__ : Tuple =parent
A__ : Any =batch_size
A__ : List[str] =seq_length
A__ : Optional[Any] =is_training
A__ : Dict =use_input_lengths
A__ : int =use_token_type_ids
A__ : Union[str, Any] =use_labels
A__ : Optional[Any] =gelu_activation
A__ : List[Any] =sinusoidal_embeddings
A__ : List[Any] =causal
A__ : str =asm
A__ : Tuple =n_langs
A__ : Dict =vocab_size
A__ : Optional[Any] =n_special
A__ : Tuple =hidden_size
A__ : Dict =num_hidden_layers
A__ : int =num_attention_heads
A__ : Optional[Any] =hidden_dropout_prob
A__ : Optional[Any] =attention_probs_dropout_prob
A__ : Optional[int] =max_position_embeddings
A__ : Optional[int] =type_sequence_label_size
A__ : Tuple =initializer_range
A__ : Any =num_labels
A__ : str =num_choices
A__ : Optional[int] =summary_type
A__ : int =use_proj
A__ : Tuple =scope
A__ : Union[str, Any] =bos_token_id
def lowercase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
A__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Dict =random_attention_mask([self.batch_size, self.seq_length] )
A__ : Tuple =None
if self.use_input_lengths:
A__ : Tuple =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
A__ : Optional[Any] =None
if self.use_token_type_ids:
A__ : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
A__ : Any =None
A__ : Tuple =None
A__ : Optional[Any] =None
if self.use_labels:
A__ : List[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : Union[str, Any] =ids_tensor([self.batch_size] , 2 ).float()
A__ : str =ids_tensor([self.batch_size] , self.num_choices )
A__ : Union[str, Any] =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , ) -> Optional[Any]:
'''simple docstring'''
A__ : List[str] =XLMModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Dict =model(lowerCAmelCase_ , lengths=lowerCAmelCase_ , langs=lowerCAmelCase_ )
A__ : Any =model(lowerCAmelCase_ , langs=lowerCAmelCase_ )
A__ : Tuple =model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , ) -> Union[str, Any]:
'''simple docstring'''
A__ : List[Any] =XLMWithLMHeadModel(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Tuple =model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] , ) -> str:
'''simple docstring'''
A__ : Union[str, Any] =XLMForQuestionAnsweringSimple(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : List[str] =model(lowerCAmelCase_ )
A__ : Optional[int] =model(lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ )
A__ : List[Any] =outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : int , ) -> Any:
'''simple docstring'''
A__ : str =XLMForQuestionAnswering(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : List[str] =model(lowerCAmelCase_ )
A__ : Tuple =model(
lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , cls_index=lowerCAmelCase_ , is_impossible=lowerCAmelCase_ , p_mask=lowerCAmelCase_ , )
A__ : Optional[Any] =model(
lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , cls_index=lowerCAmelCase_ , is_impossible=lowerCAmelCase_ , )
((A__) , ) : List[Any] =result_with_labels.to_tuple()
A__ : Tuple =model(lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ )
((A__) , ) : Tuple =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def lowercase__ ( self : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : int , ) -> Any:
'''simple docstring'''
A__ : Union[str, Any] =XLMForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : str =model(lowerCAmelCase_ )
A__ : List[Any] =model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , ) -> Dict:
'''simple docstring'''
A__ : int =self.num_labels
A__ : Tuple =XLMForTokenClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Any =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , ) -> List[str]:
'''simple docstring'''
A__ : Optional[Any] =self.num_choices
A__ : Optional[int] =XLMForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Optional[int] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : str =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Union[str, Any] =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Union[str, Any] =model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
A__ : Dict =self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : Optional[int] =config_and_inputs
A__ : Any ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """lengths""": input_lengths}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
__snake_case = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
__snake_case = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowercase__ ( self : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str]=False ) -> int:
'''simple docstring'''
A__ : Tuple =super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
A__ : List[str] =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
A__ : Any =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
return inputs_dict
def lowercase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
A__ : Dict =XLMModelTester(self )
A__ : List[str] =ConfigTester(self , config_class=lowerCAmelCase_ , emb_dim=37 )
def lowercase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCAmelCase_ )
def lowercase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCAmelCase_ )
def lowercase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
A__ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCAmelCase_ )
def lowercase__ ( self : List[Any] ) -> str:
'''simple docstring'''
A__ : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCAmelCase_ )
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
A__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCAmelCase_ )
def lowercase__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
A__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : Tuple=1 ) -> Tuple:
'''simple docstring'''
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(
[isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for iter_attentions in attentions] , [True] * len(lowerCAmelCase_ ) )
self.assertEqual(len(lowerCAmelCase_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(lowerCAmelCase_ ):
# adds PAD dummy token
A__ : Tuple =min_length + idx + 1
A__ : Tuple =min_length + idx + 1
A__ : Dict =(
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCAmelCase_ ) )
def lowercase__ ( self : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Union[str, Any]=1 ) -> Any:
'''simple docstring'''
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(
[isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for iter_hidden_states in hidden_states] , [True] * len(lowerCAmelCase_ ) , )
self.assertEqual(len(lowerCAmelCase_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(lowerCAmelCase_ ):
# adds PAD dummy token
A__ : str =min_length + idx + 1
A__ : List[Any] =(batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCAmelCase_ ) , )
pass
@slow
def lowercase__ ( self : int ) -> List[Any]:
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Tuple =XLMModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_torch
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
A__ : Any =XLMWithLMHeadModel.from_pretrained("""xlm-mlm-en-2048""" )
model.to(lowerCAmelCase_ )
A__ : List[Any] =torch.tensor([[14, 4_47]] , dtype=torch.long , device=lowerCAmelCase_ ) # the president
A__ : Optional[Any] =[
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
A__ : Tuple =model.generate(lowerCAmelCase_ , do_sample=lowerCAmelCase_ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCAmelCase_ )
| 687
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a_ : List[Any] = {
"configuration_wav2vec2": ["WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Wav2Vec2Config"],
"feature_extraction_wav2vec2": ["Wav2Vec2FeatureExtractor"],
"processing_wav2vec2": ["Wav2Vec2Processor"],
"tokenization_wav2vec2": ["Wav2Vec2CTCTokenizer", "Wav2Vec2Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = [
"WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Wav2Vec2ForAudioFrameClassification",
"Wav2Vec2ForCTC",
"Wav2Vec2ForMaskedLM",
"Wav2Vec2ForPreTraining",
"Wav2Vec2ForSequenceClassification",
"Wav2Vec2ForXVector",
"Wav2Vec2Model",
"Wav2Vec2PreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Union[str, Any] = [
"TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFWav2Vec2ForCTC",
"TFWav2Vec2Model",
"TFWav2Vec2PreTrainedModel",
"TFWav2Vec2ForSequenceClassification",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[int] = [
"FlaxWav2Vec2ForCTC",
"FlaxWav2Vec2ForPreTraining",
"FlaxWav2Vec2Model",
"FlaxWav2Vec2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
a_ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 439
|
def a (lowerCAmelCase__ = 1_000_000 ):
__a = 1
__a = 1
__a = {1: 1}
for inputa in range(2 , lowerCAmelCase__ ):
__a = 0
__a = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
__a = (3 * number) + 1
counter += 1
if inputa not in counters:
__a = counter
if counter > pre_counter:
__a = inputa
__a = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 99
| 0
|
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
__lowerCAmelCase = get_tests_dir("fixtures")
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def UpperCAmelCase__ ( self : int ):
# A mock response for an HTTP head request to emulate server down
_UpperCAmelCase = mock.Mock()
_UpperCAmelCase = 500
_UpperCAmelCase = {}
_UpperCAmelCase = HTTPError
_UpperCAmelCase = {}
# Download this model to make sure it's in the cache.
_UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=__UpperCamelCase ) as mock_head:
_UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase__ ( self : Optional[int] ):
# This test is for deprecated behavior and can be removed in v5
_UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" )
@is_staging_test
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
@classmethod
def UpperCAmelCase__ ( cls : Dict ):
_UpperCAmelCase = TOKEN
HfFolder.save_token(__UpperCamelCase )
@classmethod
def UpperCAmelCase__ ( cls : int ):
try:
delete_repo(token=cls._token , repo_id="test-feature-extractor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-feature-extractor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-feature-extractor" )
except HTTPError:
pass
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(__UpperCamelCase )
feature_extractor.push_to_hub("test-feature-extractor" , use_auth_token=self._token )
_UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__UpperCamelCase , repo_id="test-feature-extractor" , push_to_hub=__UpperCamelCase , use_auth_token=self._token )
_UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
def UpperCAmelCase__ ( self : int ):
_UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(__UpperCamelCase )
feature_extractor.push_to_hub("valid_org/test-feature-extractor" , use_auth_token=self._token )
_UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__UpperCamelCase , repo_id="valid_org/test-feature-extractor-org" , push_to_hub=__UpperCamelCase , use_auth_token=self._token )
_UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
def UpperCAmelCase__ ( self : str ):
CustomFeatureExtractor.register_for_auto_class()
_UpperCAmelCase = CustomFeatureExtractor.from_pretrained(__UpperCamelCase )
feature_extractor.push_to_hub("test-dynamic-feature-extractor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"} , )
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(
F'''{USER}/test-dynamic-feature-extractor''' , trust_remote_code=__UpperCamelCase )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , "CustomFeatureExtractor" )
| 129
|
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
__lowerCAmelCase = 5_0_0_0_0_0
__lowerCAmelCase , __lowerCAmelCase = os.path.split(__file__)
__lowerCAmelCase = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def __lowerCamelCase ( _lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
_UpperCAmelCase = dataset.map(**_lowerCAmelCase )
@get_duration
def __lowerCamelCase ( _lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
_UpperCAmelCase = dataset.filter(**_lowerCAmelCase )
def __lowerCamelCase ( ) -> Union[str, Any]:
_UpperCAmelCase = {"num examples": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = datasets.Features({"text": datasets.Value("string" ), "numbers": datasets.Value("float32" )} )
_UpperCAmelCase = generate_example_dataset(
os.path.join(_lowerCAmelCase , "dataset.arrow" ) , _lowerCAmelCase , num_examples=_lowerCAmelCase )
_UpperCAmelCase = transformers.AutoTokenizer.from_pretrained("bert-base-cased" , use_fast=_lowerCAmelCase )
def tokenize(_lowerCAmelCase ):
return tokenizer(examples["text"] )
_UpperCAmelCase = map(_lowerCAmelCase )
_UpperCAmelCase = map(_lowerCAmelCase , batched=_lowerCAmelCase )
_UpperCAmelCase = map(_lowerCAmelCase , function=lambda _lowerCAmelCase : None , batched=_lowerCAmelCase )
with dataset.formatted_as(type="numpy" ):
_UpperCAmelCase = map(_lowerCAmelCase , function=lambda _lowerCAmelCase : None , batched=_lowerCAmelCase )
with dataset.formatted_as(type="pandas" ):
_UpperCAmelCase = map(_lowerCAmelCase , function=lambda _lowerCAmelCase : None , batched=_lowerCAmelCase )
with dataset.formatted_as(type="torch" , columns="numbers" ):
_UpperCAmelCase = map(_lowerCAmelCase , function=lambda _lowerCAmelCase : None , batched=_lowerCAmelCase )
with dataset.formatted_as(type="tensorflow" , columns="numbers" ):
_UpperCAmelCase = map(_lowerCAmelCase , function=lambda _lowerCAmelCase : None , batched=_lowerCAmelCase )
_UpperCAmelCase = map(_lowerCAmelCase , function=_lowerCAmelCase , batched=_lowerCAmelCase )
_UpperCAmelCase = filter(_lowerCAmelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(_lowerCAmelCase , "wb" ) as f:
f.write(json.dumps(_lowerCAmelCase ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 129
| 1
|
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = ComputeEnvironment.AMAZON_SAGEMAKER
lowerCAmelCase = True
lowerCAmelCase = '''ml.p3.2xlarge'''
lowerCAmelCase = '''accelerate_sagemaker_execution_role'''
lowerCAmelCase = '''hf-sm'''
lowerCAmelCase = '''us-east-1'''
lowerCAmelCase = 1
lowerCAmelCase = '''accelerate-sagemaker-1'''
lowerCAmelCase = '''1.6'''
lowerCAmelCase = '''4.4'''
lowerCAmelCase = '''train.py'''
lowerCAmelCase = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''False''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
lowerCAmelCase = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''--do_test''',
'''False''',
'''--do_predict''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
class __a( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> List[Any]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
UpperCAmelCase_ : Union[str, Any] = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args['''model_name_or_path'''] ,_SCREAMING_SNAKE_CASE )
assert isinstance(converted_args['''do_train'''] ,_SCREAMING_SNAKE_CASE )
assert isinstance(converted_args['''epochs'''] ,_SCREAMING_SNAKE_CASE )
assert isinstance(converted_args['''learning_rate'''] ,_SCREAMING_SNAKE_CASE )
assert isinstance(converted_args['''max_steps'''] ,_SCREAMING_SNAKE_CASE )
with pytest.raises(_SCREAMING_SNAKE_CASE ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 30
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
__a = logging.get_logger(__name__)
__a = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__a = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__a = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__a = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
__a = {
'facebook/dpr-ctx_encoder-single-nq-base': 512,
'facebook/dpr-ctx_encoder-multiset-base': 512,
}
__a = {
'facebook/dpr-question_encoder-single-nq-base': 512,
'facebook/dpr-question_encoder-multiset-base': 512,
}
__a = {
'facebook/dpr-reader-single-nq-base': 512,
'facebook/dpr-reader-multiset-base': 512,
}
__a = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
__a = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
__a = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__a = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
__a = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
__a = R'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(_a )
class __a:
"""simple docstring"""
def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ,return_attention_mask=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
elif titles is None or texts is None:
UpperCAmelCase_ : List[str] = titles if texts is None else texts
return super().__call__(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ,return_attention_mask=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : List[Any] = titles if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else [titles]
UpperCAmelCase_ : List[str] = texts if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else [texts]
UpperCAmelCase_ : Any = len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = questions if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else [questions] * n_passages
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
f'''There should be as many titles than texts but got {len(_SCREAMING_SNAKE_CASE )} titles and {len(_SCREAMING_SNAKE_CASE )} texts.''' )
UpperCAmelCase_ : Tuple = super().__call__(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE )['''input_ids''']
UpperCAmelCase_ : int = super().__call__(_SCREAMING_SNAKE_CASE ,add_special_tokens=_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE )['''input_ids''']
UpperCAmelCase_ : Optional[int] = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
]
}
if return_attention_mask is not False:
UpperCAmelCase_ : List[str] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
UpperCAmelCase_ : Dict = attention_mask
return self.pad(_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 16 ,_SCREAMING_SNAKE_CASE = 64 ,_SCREAMING_SNAKE_CASE = 4 ,) -> List[DPRSpanPrediction]:
UpperCAmelCase_ : Tuple = reader_input['''input_ids''']
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Optional[Any] = reader_output[:3]
UpperCAmelCase_ : Optional[Any] = len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = sorted(range(_SCREAMING_SNAKE_CASE ) ,reverse=_SCREAMING_SNAKE_CASE ,key=relevance_logits.__getitem__ )
UpperCAmelCase_ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
UpperCAmelCase_ : List[Any] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
UpperCAmelCase_ : str = sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
UpperCAmelCase_ : List[Any] = sequence_ids.index(self.pad_token_id )
else:
UpperCAmelCase_ : int = len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=_SCREAMING_SNAKE_CASE ,top_spans=_SCREAMING_SNAKE_CASE ,)
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=_SCREAMING_SNAKE_CASE ,start_index=_SCREAMING_SNAKE_CASE ,end_index=_SCREAMING_SNAKE_CASE ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) )
if len(_SCREAMING_SNAKE_CASE ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> List[DPRSpanPrediction]:
UpperCAmelCase_ : Tuple = []
for start_index, start_score in enumerate(_SCREAMING_SNAKE_CASE ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
UpperCAmelCase_ : int = sorted(_SCREAMING_SNAKE_CASE ,key=lambda _SCREAMING_SNAKE_CASE : x[1] ,reverse=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f'''Wrong span indices: [{start_index}:{end_index}]''' )
UpperCAmelCase_ : str = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f'''Span is too long: {length} > {max_answer_length}''' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_SCREAMING_SNAKE_CASE ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_a )
class __a( _a , _a ):
"""simple docstring"""
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase = ['''input_ids''', '''attention_mask''']
| 30
| 1
|
'''simple docstring'''
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
__UpperCAmelCase = logging.getLogger(__name__)
__UpperCAmelCase = 'pytorch_model.bin'
@dataclasses.dataclass
class _a :
"""simple docstring"""
A = dataclasses.field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models.'} )
A = dataclasses.field(
default=_A , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co.'} , )
@dataclasses.dataclass
class _a :
"""simple docstring"""
A = dataclasses.field(metadata={'help': 'A csv or a json file containing the training data.'} )
A = dataclasses.field(metadata={'help': 'A csv or a json file containing the data to predict on.'} )
A = dataclasses.field(
default=_A , metadata={'help': 'A csv or a json file containing the validation data.'} )
A = dataclasses.field(
default=_A , metadata={'help': 'The name of the task to train on.'} , )
A = dataclasses.field(
default=_A , metadata={'help': 'The list of labels for the task.'} )
@dataclasses.dataclass
class _a :
"""simple docstring"""
A = dataclasses.field(
metadata={'help': 'The output directory where the model predictions and checkpoints will be written.'} )
A = dataclasses.field(
default='accuracy' , metadata={'help': 'The evaluation metric used for the task.'} )
A = dataclasses.field(
default='no' , metadata={
'help': 'The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]'
} , )
A = dataclasses.field(
default=10 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
A = dataclasses.field(
default=0.0 , metadata={
'help': 'How much the specified evaluation metric must improve to satisfy early stopping conditions.'
} , )
A = dataclasses.field(
default=_A , metadata={'help': 'Whether to filter the pseudo-labeled data based on the confidence score.'} , )
A = dataclasses.field(
default=_A , metadata={'help': 'Whether to filter the pseudo-labeled data based on the validation performance.'} , )
A = dataclasses.field(
default=_A , metadata={'help': 'Whether to fine-tune on labeled data after pseudo training.'} , )
A = dataclasses.field(
default=0.0 , metadata={'help': 'Confidence threshold for pseudo-labeled data filtering.'} , )
A = dataclasses.field(
default=1_00 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
A = dataclasses.field(
default=_A , metadata={'help': 'Random seed for initialization.'} , )
def SCREAMING_SNAKE_CASE_ ( snake_case_ : List[str] , snake_case_ : Tuple , snake_case_ : Tuple , snake_case_ : Tuple , snake_case_ : str , snake_case_ : int ) -> str:
SCREAMING_SNAKE_CASE : int = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
SCREAMING_SNAKE_CASE : List[str] = dataset.filter(lambda snake_case_ : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
SCREAMING_SNAKE_CASE : str = int(eval_result * len(A_ ) )
print(A_ )
SCREAMING_SNAKE_CASE : str = dataset.sort('probability' , reverse=A_ )
SCREAMING_SNAKE_CASE : Dict = dataset.select(range(A_ ) )
SCREAMING_SNAKE_CASE : Any = dataset.remove_columns(['label', 'probability'] )
SCREAMING_SNAKE_CASE : List[str] = dataset.rename_column('prediction' , 'label' )
SCREAMING_SNAKE_CASE : str = dataset.map(lambda snake_case_ : {"label": idalabel[example["label"]]} )
SCREAMING_SNAKE_CASE : List[str] = dataset.shuffle(seed=args.seed )
SCREAMING_SNAKE_CASE : List[str] = os.path.join(A_ , f"""train_pseudo.{args.data_file_extension}""" )
if args.data_file_extension == "csv":
dataset.to_csv(A_ , index=A_ )
else:
dataset.to_json(A_ )
def SCREAMING_SNAKE_CASE_ ( snake_case_ : int , snake_case_ : str , snake_case_ : int , snake_case_ : List[str] , **snake_case_ : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : int = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
SCREAMING_SNAKE_CASE : Optional[Any] = STModelArguments(model_name_or_path=A_ )
SCREAMING_SNAKE_CASE : Tuple = STDataArguments(train_file=A_ , infer_file=A_ )
SCREAMING_SNAKE_CASE : Dict = STTrainingArguments(output_dir=A_ )
SCREAMING_SNAKE_CASE : int = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(A_ ).items():
setattr(A_ , A_ , A_ )
for key, value in kwargs.items():
if hasattr(A_ , A_ ):
setattr(A_ , A_ , A_ )
# Sanity checks
SCREAMING_SNAKE_CASE : List[Any] = {}
SCREAMING_SNAKE_CASE : List[str] = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
SCREAMING_SNAKE_CASE : Any = args.train_file
SCREAMING_SNAKE_CASE : Optional[int] = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
SCREAMING_SNAKE_CASE : Tuple = args.eval_file
for key in data_files:
SCREAMING_SNAKE_CASE : Union[str, Any] = data_files[key].split('.' )[-1]
assert extension in ["csv", "json"], f"""`{key}_file` should be a csv or a json file."""
if args.data_file_extension is None:
SCREAMING_SNAKE_CASE : Dict = extension
else:
assert extension == args.data_file_extension, f"""`{key}_file` should be a {args.data_file_extension} file`."""
assert (
args.eval_metric in datasets.list_metrics()
), f"""{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}."""
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('Creating the initial data directory for self-training...' )
SCREAMING_SNAKE_CASE : Dict = f"""{args.output_dir}/self-train_iter-{{}}""".format
SCREAMING_SNAKE_CASE : Union[str, Any] = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=A_ )
os.makedirs(A_ , exist_ok=A_ )
accelerator.wait_for_everyone()
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
SCREAMING_SNAKE_CASE : str = False
# Show the progress bar
SCREAMING_SNAKE_CASE : Tuple = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
SCREAMING_SNAKE_CASE : Any = data_dir_format(A_ )
assert os.path.exists(A_ )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
SCREAMING_SNAKE_CASE : List[str] = os.path.join(A_ , 'stage-1' )
SCREAMING_SNAKE_CASE : int = {
'accelerator': accelerator,
'model_name_or_path': args.model_name_or_path,
'cache_dir': args.cache_dir,
'do_train': True,
'train_file': data_files['train'] if iteration == 0 else data_files['train_pseudo'],
'do_eval': True if args.eval_file is not None else False,
'eval_file': data_files['eval'],
'do_predict': True,
'infer_file': data_files['infer'],
'task_name': args.task_name,
'label_list': args.label_list,
'output_dir': current_output_dir,
'eval_metric': args.eval_metric,
'evaluation_strategy': args.evaluation_strategy,
'early_stopping_patience': args.early_stopping_patience,
'early_stopping_threshold': args.early_stopping_threshold,
'seed': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(A_ , A_ ):
arguments_dict.update({key: value} )
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(A_ , 'best-checkpoint' , A_ )
if os.path.exists(A_ ):
logger.info(
'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.' , A_ , A_ , )
else:
logger.info('***** Running self-training: iteration: %d, stage: 1 *****' , A_ )
finetune(**A_ )
accelerator.wait_for_everyone()
assert os.path.exists(A_ )
logger.info('Self-training job completed: iteration: %d, stage: 1.' , A_ )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(A_ , 'best-checkpoint' )
SCREAMING_SNAKE_CASE : Any = os.path.join(A_ , 'stage-2' )
# Update arguments_dict
SCREAMING_SNAKE_CASE : Union[str, Any] = model_path
SCREAMING_SNAKE_CASE : Tuple = data_files['train']
SCREAMING_SNAKE_CASE : Optional[int] = current_output_dir
SCREAMING_SNAKE_CASE : int = os.path.join(A_ , 'best-checkpoint' , A_ )
if os.path.exists(A_ ):
logger.info(
'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.' , A_ , A_ , )
else:
logger.info('***** Running self-training: iteration: %d, stage: 2 *****' , A_ )
finetune(**A_ )
accelerator.wait_for_everyone()
assert os.path.exists(A_ )
logger.info('Self-training job completed: iteration: %d, stage: 2.' , A_ )
SCREAMING_SNAKE_CASE : Tuple = iteration
SCREAMING_SNAKE_CASE : str = data_dir_format(iteration + 1 )
SCREAMING_SNAKE_CASE : List[Any] = AutoConfig.from_pretrained(os.path.join(A_ , 'best-checkpoint' ) )
SCREAMING_SNAKE_CASE : Any = config.idalabel
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(A_ , 'eval_results_best-checkpoint.json' )
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(A_ , 'test_results_best-checkpoint.json' )
assert os.path.exists(A_ )
with open(A_ , 'r' ) as f:
SCREAMING_SNAKE_CASE : Optional[int] = float(json.load(A_ )[args.eval_metric] )
SCREAMING_SNAKE_CASE : int = os.path.join(A_ , 'infer_output_best-checkpoint.csv' )
assert os.path.exists(A_ )
# Loading the dataset from local csv or json files.
SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset(args.data_file_extension , data_files={'data': data_files['infer']} )['data']
SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset('csv' , data_files={'data': infer_output_file} )['data']
if accelerator.is_main_process:
os.makedirs(A_ , exist_ok=A_ )
shutil.copy(A_ , os.path.join(A_ , f"""eval_results_iter-{iteration}.json""" ) )
if os.path.exists(A_ ):
shutil.copy(A_ , os.path.join(A_ , f"""test_results_iter-{iteration}.json""" ) )
create_pseudo_labeled_data(A_ , A_ , A_ , A_ , A_ , A_ )
accelerator.wait_for_everyone()
SCREAMING_SNAKE_CASE : List[str] = os.path.join(A_ , f"""train_pseudo.{args.data_file_extension}""" )
if args.evaluation_strategy != IntervalStrategy.NO.value:
SCREAMING_SNAKE_CASE : List[Any] = eval_result
if best_iteration is None:
SCREAMING_SNAKE_CASE : List[str] = new_iteration
SCREAMING_SNAKE_CASE : Any = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
SCREAMING_SNAKE_CASE : int = new_iteration
SCREAMING_SNAKE_CASE : Tuple = new_eval_result
SCREAMING_SNAKE_CASE : Any = 0
else:
if new_eval_result == best_eval_result:
SCREAMING_SNAKE_CASE : List[Any] = new_iteration
SCREAMING_SNAKE_CASE : Optional[Any] = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
SCREAMING_SNAKE_CASE : str = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('Best iteration: %d' , A_ )
logger.info('Best evaluation result: %s = %f' , args.eval_metric , A_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(A_ , f"""eval_results_iter-{iteration}.json""" ) , os.path.join(A_ , 'eval_results_best-iteration.json' ) , )
else:
# Assume that the last iteration is the best
logger.info('Best iteration: %d' , args.max_selftrain_iterations - 1 )
logger.info('Best evaluation result: %s = %f' , args.eval_metric , A_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(A_ , f"""eval_results_iter-{args.max_selftrain_iterations - 1}.json""" ) , os.path.join(A_ , 'eval_results_best-iteration.json' ) , )
| 710
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
__UpperCAmelCase = 'pytorch_model.bin'
__UpperCAmelCase = 'pytorch_model.bin.index.json'
__UpperCAmelCase = 'adapter_config.json'
__UpperCAmelCase = 'adapter_model.bin'
__UpperCAmelCase = 'adapter_model.safetensors'
__UpperCAmelCase = 'tf_model.h5'
__UpperCAmelCase = 'tf_model.h5.index.json'
__UpperCAmelCase = 'model.ckpt'
__UpperCAmelCase = 'flax_model.msgpack'
__UpperCAmelCase = 'flax_model.msgpack.index.json'
__UpperCAmelCase = 'model.safetensors'
__UpperCAmelCase = 'model.safetensors.index.json'
__UpperCAmelCase = 'config.json'
__UpperCAmelCase = 'preprocessor_config.json'
__UpperCAmelCase = FEATURE_EXTRACTOR_NAME
__UpperCAmelCase = 'generation_config.json'
__UpperCAmelCase = 'modelcard.json'
__UpperCAmelCase = '▁'
__UpperCAmelCase = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
__UpperCAmelCase = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
__UpperCAmelCase = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
__UpperCAmelCase = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def SCREAMING_SNAKE_CASE_ ( snake_case_ : List[Any] ) -> Tuple:
if version.parse(snake_case_ ) < version.parse(snake_case_ ):
if "dev" in min_version:
SCREAMING_SNAKE_CASE : Any = (
'This example requires a source install from HuggingFace Transformers (see '
'`https://huggingface.co/docs/transformers/installation#install-from-source`),'
)
else:
SCREAMING_SNAKE_CASE : List[Any] = f"""This example requires a minimum version of {min_version},"""
error_message += f""" but the version found is {__version__}.\n"""
raise ImportError(
error_message
+ 'Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other '
'versions of HuggingFace Transformers.' )
| 220
| 0
|
class snake_case_ :
def __init__( self :Any ,__snake_case :Dict ) -> Tuple:
a__ = val
a__ = None
a__ = None
def lowerCamelCase__( self :List[str] ,__snake_case :Dict ) -> Optional[int]:
if self.val:
if val < self.val:
if self.left is None:
a__ = Node(_A )
else:
self.left.insert(_A )
elif val > self.val:
if self.right is None:
a__ = Node(_A )
else:
self.right.insert(_A )
else:
a__ = val
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : str ):
if root:
inorder(root.left , __lowerCAmelCase )
res.append(root.val )
inorder(root.right , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Tuple ):
if len(__lowerCAmelCase ) == 0:
return arr
a__ = Node(arr[0] )
for i in range(1 , len(__lowerCAmelCase ) ):
root.insert(arr[i] )
# Traverse BST in order.
a__ = []
inorder(__lowerCAmelCase , __lowerCAmelCase )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 335
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a__ ( A__ ):
A = ['image_processor', 'tokenizer']
A = 'ViTImageProcessor'
A = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : List[str],_A : Optional[Any]=None,_A : List[str]=None,**_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead.",_A,)
SCREAMING_SNAKE_CASE_ : List[str] = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE_ : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_A,_A )
def __call__( self : Optional[Any],_A : Any=None,_A : Tuple=None,_A : Dict=None,_A : Optional[Any]=None,**_A : int ):
"""simple docstring"""
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
SCREAMING_SNAKE_CASE_ : int = self.tokenizer(_A,return_tensors=_A,**_A )
if visual_prompt is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = self.image_processor(_A,return_tensors=_A,**_A )
if images is not None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.image_processor(_A,return_tensors=_A,**_A )
if visual_prompt is not None and images is not None:
SCREAMING_SNAKE_CASE_ : List[str] = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
SCREAMING_SNAKE_CASE_ : int = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
SCREAMING_SNAKE_CASE_ : str = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**_A ),tensor_type=_A )
def __UpperCamelCase ( self : int,*_A : Optional[Any],**_A : Dict ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_A,**_A )
def __UpperCamelCase ( self : Tuple,*_A : Dict,**_A : Optional[int] ):
"""simple docstring"""
return self.tokenizer.decode(*_A,**_A )
@property
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.",_A,)
return self.image_processor_class
@property
def __UpperCamelCase ( self : int ):
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.",_A,)
return self.image_processor
| 216
| 0
|
"""simple docstring"""
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def UpperCamelCase ( _A ) -> Union[str, Any]:
if is_torch_version("""<""" , """2.0.0""" ) or not hasattr(_A , """_dynamo""" ):
return False
return isinstance(_A , torch._dynamo.eval_frame.OptimizedModule )
def UpperCamelCase ( _A , _A = True ) -> Any:
lowercase : Optional[Any] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
lowercase : Optional[Any] = is_compiled_module(_A )
if is_compiled:
lowercase : Tuple = model
lowercase : Tuple = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_A , _A ):
lowercase : List[Any] = model.module
if not keep_fpaa_wrapper:
lowercase : Any = getattr(_A , """forward""" )
lowercase : Optional[int] = model.__dict__.pop("""_original_forward""" , _A )
if original_forward is not None:
while hasattr(_A , """__wrapped__""" ):
lowercase : List[str] = forward.__wrapped__
if forward == original_forward:
break
lowercase : List[Any] = forward
if getattr(_A , """_converted_to_transformer_engine""" , _A ):
convert_model(_A , to_transformer_engine=_A )
if is_compiled:
lowercase : Union[str, Any] = model
lowercase : Tuple = compiled_model
return model
def UpperCamelCase ( ) -> str:
PartialState().wait_for_everyone()
def UpperCamelCase ( _A , _A ) -> str:
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_A , _A )
elif PartialState().local_process_index == 0:
torch.save(_A , _A )
@contextmanager
def UpperCamelCase ( **_A ) -> int:
for key, value in kwargs.items():
lowercase : Union[str, Any] = str(_A )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def UpperCamelCase ( _A ) -> List[str]:
if not hasattr(_A , """__qualname__""" ) and not hasattr(_A , """__name__""" ):
lowercase : Optional[int] = getattr(_A , """__class__""" , _A )
if hasattr(_A , """__qualname__""" ):
return obj.__qualname__
if hasattr(_A , """__name__""" ):
return obj.__name__
return str(_A )
def UpperCamelCase ( _A , _A ) -> List[str]:
for key, value in source.items():
if isinstance(_A , _A ):
lowercase : List[Any] = destination.setdefault(_A , {} )
merge_dicts(_A , _A )
else:
lowercase : Optional[int] = value
return destination
def UpperCamelCase ( _A = None ) -> bool:
if port is None:
lowercase : int = 29_500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(("""localhost""", port) ) == 0
| 348
|
"""simple docstring"""
import argparse
import os
import re
import packaging.version
_lowerCAmelCase = 'examples/'
_lowerCAmelCase = {
'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
_lowerCAmelCase = {
'init': 'src/transformers/__init__.py',
'setup': 'setup.py',
}
_lowerCAmelCase = 'README.md'
def UpperCamelCase ( _A , _A , _A ) -> Optional[int]:
with open(_A , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowercase : str = f.read()
lowercase , lowercase : Optional[int] = REPLACE_PATTERNS[pattern]
lowercase : Optional[Any] = replace.replace("""VERSION""" , _A )
lowercase : Tuple = re_pattern.sub(_A , _A )
with open(_A , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(_A )
def UpperCamelCase ( _A ) -> List[str]:
for folder, directories, fnames in os.walk(_A ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(_A , _A ) , _A , pattern="""examples""" )
def UpperCamelCase ( _A , _A=False ) -> Optional[Any]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_A , _A , _A )
if not patch:
update_version_in_examples(_A )
def UpperCamelCase ( ) -> Any:
lowercase : Any = """🤗 Transformers currently provides the following architectures"""
lowercase : Dict = """1. Want to contribute a new model?"""
with open(_A , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowercase : str = f.readlines()
# Find the start of the list.
lowercase : int = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowercase : Optional[int] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
lowercase : List[str] = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(_A , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(_A )
def UpperCamelCase ( ) -> Tuple:
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
lowercase : List[str] = f.read()
lowercase : List[Any] = REPLACE_PATTERNS["""init"""][0].search(_A ).groups()[0]
return packaging.version.parse(_A )
def UpperCamelCase ( _A=False ) -> List[Any]:
lowercase : str = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
lowercase : Union[str, Any] = default_version.base_version
elif patch:
lowercase : Optional[Any] = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
lowercase : List[str] = F"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
lowercase : Optional[int] = input(F"""Which version are you releasing? [{default_version}]""" )
if len(_A ) == 0:
lowercase : Optional[int] = default_version
print(F"""Updating version to {version}.""" )
global_version_update(_A , patch=_A )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def UpperCamelCase ( ) -> Tuple:
lowercase : Any = get_version()
lowercase : Optional[int] = F"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
lowercase : Dict = current_version.base_version
# Check with the user we got that right.
lowercase : List[Any] = input(F"""Which version are we developing now? [{dev_version}]""" )
if len(_A ) == 0:
lowercase : int = dev_version
print(F"""Updating version to {version}.""" )
global_version_update(_A )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
_lowerCAmelCase = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 348
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_=13 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=99 , lowerCamelCase_=32 , lowerCamelCase_=2 , lowerCamelCase_=4 , lowerCamelCase_=37 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_12 , lowerCamelCase_=16 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=3 , lowerCamelCase_=4 , lowerCamelCase_=None , ) -> Tuple:
lowerCAmelCase__ = parent
lowerCAmelCase__ = 13
lowerCAmelCase__ = 7
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = 99
lowerCAmelCase__ = 32
lowerCAmelCase__ = 2
lowerCAmelCase__ = 4
lowerCAmelCase__ = 37
lowerCAmelCase__ = '''gelu'''
lowerCAmelCase__ = 0.1
lowerCAmelCase__ = 0.1
lowerCAmelCase__ = 5_12
lowerCAmelCase__ = 16
lowerCAmelCase__ = 2
lowerCAmelCase__ = 0.02
lowerCAmelCase__ = 3
lowerCAmelCase__ = 4
lowerCAmelCase__ = None
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
if self.use_token_type_ids:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=lowerCamelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
lowerCAmelCase__ = TFRoFormerModel(config=lowerCamelCase_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = [input_ids, input_mask]
lowerCAmelCase__ = model(lowerCamelCase_ )
lowerCAmelCase__ = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
lowerCAmelCase__ = True
lowerCAmelCase__ = TFRoFormerForCausalLM(config=lowerCamelCase_ )
lowerCAmelCase__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowerCAmelCase__ = model(lowerCamelCase_ )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
lowerCAmelCase__ = TFRoFormerForMaskedLM(config=lowerCamelCase_ )
lowerCAmelCase__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowerCAmelCase__ = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFRoFormerForSequenceClassification(config=lowerCamelCase_ )
lowerCAmelCase__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowerCAmelCase__ = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
lowerCAmelCase__ = self.num_choices
lowerCAmelCase__ = TFRoFormerForMultipleChoice(config=lowerCamelCase_ )
lowerCAmelCase__ = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowerCAmelCase__ = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFRoFormerForTokenClassification(config=lowerCamelCase_ )
lowerCAmelCase__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowerCAmelCase__ = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
lowerCAmelCase__ = TFRoFormerForQuestionAnswering(config=lowerCamelCase_ )
lowerCAmelCase__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowerCAmelCase__ = model(lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) = config_and_inputs
lowerCAmelCase__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class a__ ( a__ , a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[int] = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
lowercase__ : Tuple = (
{
"feature-extraction": TFRoFormerModel,
"fill-mask": TFRoFormerForMaskedLM,
"question-answering": TFRoFormerForQuestionAnswering,
"text-classification": TFRoFormerForSequenceClassification,
"text-generation": TFRoFormerForCausalLM,
"token-classification": TFRoFormerForTokenClassification,
"zero-shot": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase__ : Optional[Any] = False
lowercase__ : List[Any] = False
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = TFRoFormerModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(lowerCamelCase_ )
@require_tf
class a__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
lowerCAmelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase__ = model(lowerCamelCase_ )[0]
# TODO Replace vocab size
lowerCAmelCase__ = 5_00_00
lowerCAmelCase__ = [1, 6, vocab_size]
self.assertEqual(output.shape , lowerCamelCase_ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
lowerCAmelCase__ = tf.constant(
[
[
[-0.12_053_341, -1.0_264_901, 0.29_221_946],
[-1.5_133_783, 0.197_433, 0.15_190_607],
[-5.0_135_403, -3.900_256, -0.84_038_764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCamelCase_ , atol=1e-4 )
@require_tf
class a__ ( unittest.TestCase ):
'''simple docstring'''
lowercase__ : str = 1e-4
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = tf.constant([[4, 10]] )
lowerCAmelCase__ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
lowerCAmelCase__ = emba(input_ids.shape )
lowerCAmelCase__ = tf.constant(
[[0.0_000, 0.0_000, 0.0_000, 1.0_000, 1.0_000, 1.0_000], [0.8_415, 0.0_464, 0.0_022, 0.5_403, 0.9_989, 1.0_000]] )
tf.debugging.assert_near(lowerCamelCase_ , lowerCamelCase_ , atol=self.tolerance )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = tf.constant(
[
[0.0_000, 0.0_000, 0.0_000, 0.0_000, 0.0_000],
[0.8_415, 0.8_219, 0.8_020, 0.7_819, 0.7_617],
[0.9_093, 0.9_364, 0.9_581, 0.9_749, 0.9_870],
] )
lowerCAmelCase__ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_12 , embedding_dim=5_12 )
emba([2, 16, 5_12] )
lowerCAmelCase__ = emba.weight[:3, :5]
tf.debugging.assert_near(lowerCamelCase_ , lowerCamelCase_ , atol=self.tolerance )
@require_tf
class a__ ( unittest.TestCase ):
'''simple docstring'''
lowercase__ : int = 1e-4
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
# 2,12,16,64
lowerCAmelCase__ = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00
lowerCAmelCase__ = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00
lowerCAmelCase__ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
lowerCAmelCase__ = embed_positions([2, 16, 7_68] )[None, None, :, :]
lowerCAmelCase__ , lowerCAmelCase__ = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = tf.constant(
[
[0.0_000, 0.0_100, 0.0_200, 0.0_300, 0.0_400, 0.0_500, 0.0_600, 0.0_700],
[-0.2_012, 0.8_897, 0.0_263, 0.9_401, 0.2_074, 0.9_463, 0.3_481, 0.9_343],
[-1.7_057, 0.6_271, -1.2_145, 1.3_897, -0.6_303, 1.7_647, -0.1_173, 1.8_985],
[-2.1_731, -1.6_397, -2.7_358, 0.2_854, -2.1_840, 1.7_183, -1.3_018, 2.4_871],
[0.2_717, -3.6_173, -2.9_206, -2.1_988, -3.6_638, 0.3_858, -2.9_155, 2.2_980],
[3.9_859, -2.1_580, -0.7_984, -4.4_904, -4.1_181, -2.0_252, -4.4_782, 1.1_253],
] )
lowerCAmelCase__ = tf.constant(
[
[0.0_000, -0.0_100, -0.0_200, -0.0_300, -0.0_400, -0.0_500, -0.0_600, -0.0_700],
[0.2_012, -0.8_897, -0.0_263, -0.9_401, -0.2_074, -0.9_463, -0.3_481, -0.9_343],
[1.7_057, -0.6_271, 1.2_145, -1.3_897, 0.6_303, -1.7_647, 0.1_173, -1.8_985],
[2.1_731, 1.6_397, 2.7_358, -0.2_854, 2.1_840, -1.7_183, 1.3_018, -2.4_871],
[-0.2_717, 3.6_173, 2.9_206, 2.1_988, 3.6_638, -0.3_858, 2.9_155, -2.2_980],
[-3.9_859, 2.1_580, 0.7_984, 4.4_904, 4.1_181, 2.0_252, 4.4_782, -1.1_253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , lowerCamelCase_ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , lowerCamelCase_ , atol=self.tolerance )
| 90
|
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
A = {
'gwf-440k': {
'url': 'https://model-server.zqevans2.workers.dev/gwf-440k.ckpt',
'sample_rate': 4_8_0_0_0,
'sample_size': 6_5_5_3_6,
},
'jmann-small-190k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt',
'sample_rate': 4_8_0_0_0,
'sample_size': 6_5_5_3_6,
},
'jmann-large-580k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt',
'sample_rate': 4_8_0_0_0,
'sample_size': 1_3_1_0_7_2,
},
'maestro-uncond-150k': {
'url': 'https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt',
'sample_rate': 1_6_0_0_0,
'sample_size': 6_5_5_3_6,
},
'unlocked-uncond-250k': {
'url': 'https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt',
'sample_rate': 1_6_0_0_0,
'sample_size': 6_5_5_3_6,
},
'honk-140k': {
'url': 'https://model-server.zqevans2.workers.dev/honk-140k.ckpt',
'sample_rate': 1_6_0_0_0,
'sample_size': 6_5_5_3_6,
},
}
def lowerCamelCase ( UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] ) -> Optional[Any]:
return torch.atana(UpperCamelCase , UpperCamelCase ) / math.pi * 2
def lowerCamelCase ( UpperCamelCase : str ) -> Union[str, Any]:
_lowerCamelCase = torch.sin(t * math.pi / 2 ) ** 2
_lowerCamelCase = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(UpperCamelCase , UpperCamelCase )
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
pass
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , snake_case__ : Any ) -> Optional[Any]:
super().__init__()
_lowerCamelCase = DiffusionAttnUnetaD(snake_case__ , n_attn_layers=4 )
_lowerCamelCase = deepcopy(self.diffusion )
_lowerCamelCase = torch.quasirandom.SobolEngine(1 , scramble=snake_case__ )
def lowerCamelCase ( UpperCamelCase : List[Any] ) -> List[str]:
_lowerCamelCase = MODELS_MAP[model_name]['url']
os.system(F"""wget {url} ./""" )
return F"""./{model_name}.ckpt"""
A = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
}
A = {
'8': 'resnets.0',
'9': 'attentions.0',
'10': 'resnets.1',
'11': 'attentions.1',
'12': 'resnets.2',
'13': 'attentions.2',
}
A = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
'8': 'resnets.3',
'9': 'attentions.3',
'10': 'resnets.4',
'11': 'attentions.4',
'12': 'resnets.5',
'13': 'attentions.5',
}
A = {
'0': 'resnets.0',
'1': 'resnets.1',
'2': 'resnets.2',
'4': 'resnets.0',
'5': 'resnets.1',
'6': 'resnets.2',
}
A = {
'skip': 'conv_skip',
'main.0': 'conv_1',
'main.1': 'group_norm_1',
'main.3': 'conv_2',
'main.4': 'group_norm_2',
}
A = {
'norm': 'group_norm',
'qkv_proj': ['query', 'key', 'value'],
'out_proj': ['proj_attn'],
}
def lowerCamelCase ( UpperCamelCase : Tuple ) -> int:
if name.startswith('skip' ):
return name.replace('skip' , RES_CONV_MAP['skip'] )
# name has to be of format main.{digit}
if not name.startswith('main.' ):
raise ValueError(F"""ResConvBlock error with {name}""" )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def lowerCamelCase ( UpperCamelCase : Optional[Any] ) -> Tuple:
for key, value in ATTN_MAP.items():
if name.startswith(UpperCamelCase ) and not isinstance(UpperCamelCase , UpperCamelCase ):
return name.replace(UpperCamelCase , UpperCamelCase )
elif name.startswith(UpperCamelCase ):
return [name.replace(UpperCamelCase , UpperCamelCase ) for v in value]
raise ValueError(F"""Attn error with {name}""" )
def lowerCamelCase ( UpperCamelCase : Any , UpperCamelCase : int=13 ) -> Optional[int]:
_lowerCamelCase = input_string
if string.split('.' )[0] == "timestep_embed":
return string.replace('timestep_embed' , 'time_proj' )
_lowerCamelCase = 0
if string.startswith('net.3.' ):
depth += 1
_lowerCamelCase = string[6:]
elif string.startswith('net.' ):
_lowerCamelCase = string[4:]
while string.startswith('main.7.' ):
depth += 1
_lowerCamelCase = string[7:]
if string.startswith('main.' ):
_lowerCamelCase = string[5:]
# mid block
if string[:2].isdigit():
_lowerCamelCase = string[:2]
_lowerCamelCase = string[2:]
else:
_lowerCamelCase = string[0]
_lowerCamelCase = string[1:]
if depth == max_depth:
_lowerCamelCase = MID_NUM_TO_LAYER[layer_num]
_lowerCamelCase = 'mid_block'
elif depth > 0 and int(UpperCamelCase ) < 7:
_lowerCamelCase = DOWN_NUM_TO_LAYER[layer_num]
_lowerCamelCase = F"""down_blocks.{depth}"""
elif depth > 0 and int(UpperCamelCase ) > 7:
_lowerCamelCase = UP_NUM_TO_LAYER[layer_num]
_lowerCamelCase = F"""up_blocks.{max_depth - depth - 1}"""
elif depth == 0:
_lowerCamelCase = DEPTH_0_TO_LAYER[layer_num]
_lowerCamelCase = F"""up_blocks.{max_depth - 1}""" if int(UpperCamelCase ) > 3 else 'down_blocks.0'
if not string_left.startswith('.' ):
raise ValueError(F"""Naming error with {input_string} and string_left: {string_left}.""" )
_lowerCamelCase = string_left[1:]
if "resnets" in new_layer:
_lowerCamelCase = convert_resconv_naming(UpperCamelCase )
elif "attentions" in new_layer:
_lowerCamelCase = convert_attn_naming(UpperCamelCase )
_lowerCamelCase = new_string_left
if not isinstance(UpperCamelCase , UpperCamelCase ):
_lowerCamelCase = prefix + '.' + new_layer + '.' + string_left
else:
_lowerCamelCase = [prefix + '.' + new_layer + '.' + s for s in string_left]
return new_string
def lowerCamelCase ( UpperCamelCase : List[Any] ) -> int:
_lowerCamelCase = {}
for k, v in state_dict.items():
if k.endswith('kernel' ):
# up- and downsample layers, don't have trainable weights
continue
_lowerCamelCase = rename(UpperCamelCase )
# check if we need to transform from Conv => Linear for attention
if isinstance(UpperCamelCase , UpperCamelCase ):
_lowerCamelCase = transform_conv_attns(UpperCamelCase , UpperCamelCase , UpperCamelCase )
else:
_lowerCamelCase = v
return new_state_dict
def lowerCamelCase ( UpperCamelCase : List[str] , UpperCamelCase : Tuple , UpperCamelCase : Dict ) -> Optional[Any]:
if len(UpperCamelCase ) == 1:
if len(v.shape ) == 3:
# weight
_lowerCamelCase = v[:, :, 0]
else:
# bias
_lowerCamelCase = v
else:
# qkv matrices
_lowerCamelCase = v.shape[0]
_lowerCamelCase = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
_lowerCamelCase = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
_lowerCamelCase = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def lowerCamelCase ( UpperCamelCase : Any ) -> Optional[Any]:
_lowerCamelCase = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
_lowerCamelCase = args.model_path.split('/' )[-1].split('.' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F"""Make sure to provide one of the official model names {MODELS_MAP.keys()}"""
_lowerCamelCase = download(UpperCamelCase )
_lowerCamelCase = MODELS_MAP[model_name]['sample_rate']
_lowerCamelCase = MODELS_MAP[model_name]['sample_size']
_lowerCamelCase = Object()
_lowerCamelCase = sample_size
_lowerCamelCase = sample_rate
_lowerCamelCase = 0
_lowerCamelCase = UNetaDModel(sample_size=UpperCamelCase , sample_rate=UpperCamelCase )
_lowerCamelCase = diffusers_model.state_dict()
_lowerCamelCase = DiffusionUncond(UpperCamelCase )
orig_model.load_state_dict(torch.load(args.model_path , map_location=UpperCamelCase )['state_dict'] )
_lowerCamelCase = orig_model.diffusion_ema.eval()
_lowerCamelCase = orig_model.state_dict()
_lowerCamelCase = rename_orig_weights(UpperCamelCase )
_lowerCamelCase = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
_lowerCamelCase = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(UpperCamelCase ) == 0, F"""Problem with {renamed_minus_diffusers}"""
assert all(k.endswith('kernel' ) for k in list(UpperCamelCase ) ), F"""Problem with {diffusers_minus_renamed}"""
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F"""Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}"""
if key == "time_proj.weight":
_lowerCamelCase = value.squeeze()
_lowerCamelCase = value
diffusers_model.load_state_dict(UpperCamelCase )
_lowerCamelCase = 1_00
_lowerCamelCase = 33
_lowerCamelCase = IPNDMScheduler(num_train_timesteps=UpperCamelCase )
_lowerCamelCase = torch.manual_seed(UpperCamelCase )
_lowerCamelCase = torch.randn([1, 2, config.sample_size] , generator=UpperCamelCase ).to(UpperCamelCase )
_lowerCamelCase = torch.linspace(1 , 0 , steps + 1 , device=UpperCamelCase )[:-1]
_lowerCamelCase = get_crash_schedule(UpperCamelCase )
_lowerCamelCase = DanceDiffusionPipeline(unet=UpperCamelCase , scheduler=UpperCamelCase )
_lowerCamelCase = torch.manual_seed(33 )
_lowerCamelCase = pipe(num_inference_steps=UpperCamelCase , generator=UpperCamelCase ).audios
_lowerCamelCase = sampling.iplms_sample(UpperCamelCase , UpperCamelCase , UpperCamelCase , {} )
_lowerCamelCase = generated.clamp(-1 , 1 )
_lowerCamelCase = (generated - audio).abs().sum()
_lowerCamelCase = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('Diff sum' , UpperCamelCase )
print('Diff max' , UpperCamelCase )
assert diff_max < 1e-3, F"""Diff max: {diff_max} is too much :-/"""
print(F"""Conversion for {model_name} successful!""" )
if __name__ == "__main__":
A = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
A = parser.parse_args()
main(args)
| 544
| 0
|
"""simple docstring"""
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
_lowerCamelCase : Union[str, Any] = '0.12' # assumed parallelism: 8
if is_torch_available():
import torch
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None ):
"""simple docstring"""
if rng is None:
A_ : Optional[int] = random.Random()
A_ : Optional[int] = 1
for dim in shape:
total_dims *= dim
A_ : Optional[Any] = []
for _ in range(_UpperCAmelCase ):
values.append(rng.randint(0 , vocab_size - 1 ) )
A_ : int = np.array(_UpperCAmelCase , dtype=jnp.intaa ).reshape(_UpperCAmelCase )
return output
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase=None ):
"""simple docstring"""
A_ : Dict = ids_tensor(_UpperCAmelCase , vocab_size=2 , rng=_UpperCAmelCase )
# make sure that at least one token is attended to for each batch
A_ : Any = 1
return attn_mask
@require_flax
class lowercase :
__lowerCAmelCase : Any = None
__lowerCAmelCase : Optional[Any] = ()
def a_ ( self : Optional[Any] ):
"""simple docstring"""
A_ , A_ : int = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
A_ : str = 2
A_ : str = inputs['''input_ids'''].shape[-1] // 2
A_ : Union[str, Any] = inputs['''input_ids'''][:max_batch_size, :sequence_length]
A_ : Optional[Any] = jnp.ones_like(_lowerCamelCase )
A_ : Any = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
A_ : List[str] = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
A_ : Tuple = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def a_ ( self : Optional[Any] ):
"""simple docstring"""
A_ , A_ , A_ , A_ : List[str] = self._get_input_ids_and_config()
A_ : int = False
A_ : int = max_length
A_ : str = 0
for model_class in self.all_generative_model_classes:
A_ : Dict = model_class(_lowerCamelCase )
A_ : Tuple = model_class.__name__[4:] # Skip the "Flax" at the beginning
A_ : str = getattr(_lowerCamelCase , _lowerCamelCase )
A_ : List[str] = pt_model_class(_lowerCamelCase ).eval()
A_ : Optional[int] = load_flax_weights_in_pytorch_model(_lowerCamelCase , flax_model.params )
A_ : str = flax_model.generate(_lowerCamelCase ).sequences
A_ : Dict = pt_model.generate(torch.tensor(_lowerCamelCase , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
A_ : Dict = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def a_ ( self : Tuple ):
"""simple docstring"""
A_ , A_ , A_ , A_ : Optional[int] = self._get_input_ids_and_config()
A_ : List[Any] = False
A_ : Tuple = max_length
for model_class in self.all_generative_model_classes:
A_ : str = model_class(_lowerCamelCase )
A_ : Optional[Any] = model.generate(_lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase )
A_ : Tuple = jit(model.generate )
A_ : List[str] = jit_generate(_lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a_ ( self : List[Any] ):
"""simple docstring"""
A_ , A_ , A_ , A_ : Dict = self._get_input_ids_and_config()
A_ : Any = True
A_ : Dict = max_length
for model_class in self.all_generative_model_classes:
A_ : Tuple = model_class(_lowerCamelCase )
A_ : Optional[int] = model.generate(_lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase )
A_ : int = jit(model.generate )
A_ : Union[str, Any] = jit_generate(_lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a_ ( self : Optional[Any] ):
"""simple docstring"""
A_ , A_ , A_ , A_ : List[str] = self._get_input_ids_and_config()
A_ : List[str] = False
A_ : str = max_length
A_ : int = 2
for model_class in self.all_generative_model_classes:
A_ : Union[str, Any] = model_class(_lowerCamelCase )
A_ : Optional[Any] = model.generate(_lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase )
A_ : List[Any] = jit(model.generate )
A_ : List[Any] = jit_generate(_lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a_ ( self : str ):
"""simple docstring"""
A_ , A_ , A_ , A_ : Optional[Any] = self._get_input_ids_and_config()
A_ : Union[str, Any] = False
A_ : int = max_length
A_ : Any = 2
A_ : Dict = 2
for model_class in self.all_generative_model_classes:
A_ : List[str] = model_class(_lowerCamelCase )
A_ : int = model.generate(_lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def a_ ( self : Tuple ):
"""simple docstring"""
A_ , A_ , A_ , A_ : Optional[Any] = self._get_input_ids_and_config()
A_ : List[Any] = True
A_ : List[Any] = max_length
A_ : Dict = 0.8
A_ : Union[str, Any] = 10
A_ : Optional[Any] = 0.3
A_ : List[str] = 1
A_ : int = 8
A_ : str = 9
for model_class in self.all_generative_model_classes:
A_ : Union[str, Any] = model_class(_lowerCamelCase )
A_ : Optional[int] = model.generate(_lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase )
A_ : int = jit(model.generate )
A_ : Optional[Any] = jit_generate(_lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a_ ( self : Optional[Any] ):
"""simple docstring"""
A_ , A_ , A_ , A_ : Optional[Any] = self._get_input_ids_and_config()
A_ : str = max_length
A_ : str = 1
A_ : Tuple = 8
A_ : Optional[Any] = 9
for model_class in self.all_generative_model_classes:
A_ : Optional[Any] = model_class(_lowerCamelCase )
A_ : Tuple = model.generate(_lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase )
A_ : List[str] = jit(model.generate )
A_ : Union[str, Any] = jit_generate(_lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a_ ( self : Optional[Any] ):
"""simple docstring"""
A_ , A_ , A_ , A_ : Union[str, Any] = self._get_input_ids_and_config()
A_ : Any = max_length
A_ : List[str] = 2
A_ : List[str] = 1
A_ : List[str] = 8
A_ : List[str] = 9
for model_class in self.all_generative_model_classes:
A_ : List[str] = model_class(_lowerCamelCase )
A_ : int = model.generate(_lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase )
A_ : Tuple = jit(model.generate )
A_ : Tuple = jit_generate(_lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a_ ( self : int ):
"""simple docstring"""
A_ , A_ , A_ , A_ : Optional[Any] = self._get_input_ids_and_config()
# pad attention mask on the left
A_ : Any = attention_mask.at[(0, 0)].set(0 )
A_ : str = False
A_ : int = max_length
for model_class in self.all_generative_model_classes:
A_ : int = model_class(_lowerCamelCase )
A_ : str = model.generate(_lowerCamelCase , attention_mask=_lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase )
A_ : Any = jit(model.generate )
A_ : Union[str, Any] = jit_generate(_lowerCamelCase , attention_mask=_lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a_ ( self : int ):
"""simple docstring"""
A_ , A_ , A_ , A_ : Optional[Any] = self._get_input_ids_and_config()
# pad attention mask on the left
A_ : Tuple = attention_mask.at[(0, 0)].set(0 )
A_ : str = True
A_ : Optional[int] = max_length
for model_class in self.all_generative_model_classes:
A_ : Union[str, Any] = model_class(_lowerCamelCase )
A_ : Union[str, Any] = model.generate(_lowerCamelCase , attention_mask=_lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase )
A_ : Optional[Any] = jit(model.generate )
A_ : Optional[int] = jit_generate(_lowerCamelCase , attention_mask=_lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a_ ( self : List[str] ):
"""simple docstring"""
A_ , A_ , A_ , A_ : List[Any] = self._get_input_ids_and_config()
# pad attention mask on the left
A_ : Union[str, Any] = attention_mask.at[(0, 0)].set(0 )
A_ : Optional[Any] = 2
A_ : List[str] = max_length
for model_class in self.all_generative_model_classes:
A_ : str = model_class(_lowerCamelCase )
A_ : Tuple = model.generate(_lowerCamelCase , attention_mask=_lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase )
A_ : Optional[int] = jit(model.generate )
A_ : str = jit_generate(_lowerCamelCase , attention_mask=_lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class lowercase ( unittest.TestCase):
def a_ ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''' )
A_ : Any = FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
A_ : Optional[Any] = '''Hello world'''
A_ : Any = tokenizer(_lowerCamelCase , return_tensors='''np''' ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(_lowerCamelCase , '''do_samples''' ):
model.generate(_lowerCamelCase , do_samples=_lowerCamelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(_lowerCamelCase , '''foo''' ):
A_ : Optional[int] = {'''foo''': '''bar'''}
model.generate(_lowerCamelCase , **_lowerCamelCase )
| 361
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
_lowerCamelCase : Any = False
class lowercase ( unittest.TestCase):
def a_ ( self : Tuple ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def a_ ( self : Any ):
"""simple docstring"""
return 12
@property
def a_ ( self : List[str] ):
"""simple docstring"""
return 12
@property
def a_ ( self : List[Any] ):
"""simple docstring"""
return 32
@property
def a_ ( self : Any ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : Union[str, Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def a_ ( self : List[Any] ):
"""simple docstring"""
A_ : int = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def a_ ( self : int ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(_lowerCamelCase )
@property
def a_ ( self : Optional[int] ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : Optional[Any] = 12
A_ : Optional[int] = 12
A_ : int = {
'''attention_bias''': True,
'''cross_attention_dim''': 32,
'''attention_head_dim''': height * width,
'''num_attention_heads''': 1,
'''num_vector_embeds''': self.num_embed,
'''num_embeds_ada_norm''': self.num_embeds_ada_norm,
'''norm_num_groups''': 32,
'''sample_size''': width,
'''activation_fn''': '''geglu-approximate''',
}
A_ : Tuple = TransformeraDModel(**_lowerCamelCase )
return model
def a_ ( self : Optional[int] ):
"""simple docstring"""
A_ : Union[str, Any] = '''cpu'''
A_ : Union[str, Any] = self.dummy_vqvae
A_ : str = self.dummy_text_encoder
A_ : List[Any] = self.dummy_tokenizer
A_ : int = self.dummy_transformer
A_ : Any = VQDiffusionScheduler(self.num_embed )
A_ : Optional[Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=_lowerCamelCase )
A_ : Dict = VQDiffusionPipeline(
vqvae=_lowerCamelCase , text_encoder=_lowerCamelCase , tokenizer=_lowerCamelCase , transformer=_lowerCamelCase , scheduler=_lowerCamelCase , learned_classifier_free_sampling_embeddings=_lowerCamelCase , )
A_ : List[Any] = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
A_ : List[Any] = '''teddy bear playing in the pool'''
A_ : List[Any] = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
A_ : List[Any] = pipe([prompt] , generator=_lowerCamelCase , num_inference_steps=2 , output_type='''np''' )
A_ : Any = output.images
A_ : List[str] = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
A_ : List[Any] = pipe(
[prompt] , generator=_lowerCamelCase , output_type='''np''' , return_dict=_lowerCamelCase , num_inference_steps=2 )[0]
A_ : Optional[int] = image[0, -3:, -3:, -1]
A_ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
A_ : Optional[int] = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def a_ ( self : List[Any] ):
"""simple docstring"""
A_ : Union[str, Any] = '''cpu'''
A_ : int = self.dummy_vqvae
A_ : List[str] = self.dummy_text_encoder
A_ : Optional[Any] = self.dummy_tokenizer
A_ : Any = self.dummy_transformer
A_ : Any = VQDiffusionScheduler(self.num_embed )
A_ : Optional[int] = LearnedClassifierFreeSamplingEmbeddings(
learnable=_lowerCamelCase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
A_ : int = VQDiffusionPipeline(
vqvae=_lowerCamelCase , text_encoder=_lowerCamelCase , tokenizer=_lowerCamelCase , transformer=_lowerCamelCase , scheduler=_lowerCamelCase , learned_classifier_free_sampling_embeddings=_lowerCamelCase , )
A_ : List[Any] = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
A_ : Any = '''teddy bear playing in the pool'''
A_ : str = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
A_ : Optional[Any] = pipe([prompt] , generator=_lowerCamelCase , num_inference_steps=2 , output_type='''np''' )
A_ : Tuple = output.images
A_ : Optional[int] = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
A_ : List[str] = pipe(
[prompt] , generator=_lowerCamelCase , output_type='''np''' , return_dict=_lowerCamelCase , num_inference_steps=2 )[0]
A_ : Optional[int] = image[0, -3:, -3:, -1]
A_ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
A_ : str = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase):
def a_ ( self : Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self : str ):
"""simple docstring"""
A_ : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' )
A_ : int = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' )
A_ : Tuple = pipeline.to(_lowerCamelCase )
pipeline.set_progress_bar_config(disable=_lowerCamelCase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
A_ : Dict = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
A_ : Union[str, Any] = pipeline(
'''teddy bear playing in the pool''' , num_images_per_prompt=1 , generator=_lowerCamelCase , output_type='''np''' , )
A_ : Optional[int] = output.images[0]
assert image.shape == (2_56, 2_56, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 361
| 1
|
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = """ClapFeatureExtractor"""
a__ = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self : Tuple , UpperCamelCase__ : str=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : List[Any]=None , **UpperCamelCase__ : Optional[int] ) -> List[str]:
"""simple docstring"""
__magic_name__ = kwargs.pop("""sampling_rate""" , UpperCamelCase__ )
if text is None and audios is None:
raise ValueError("""You have to specify either text or audios. Both cannot be none.""" )
if text is not None:
__magic_name__ = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if audios is not None:
__magic_name__ = self.feature_extractor(
UpperCamelCase__ , sampling_rate=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if text is not None and audios is not None:
__magic_name__ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase__ ) , tensor_type=UpperCamelCase__ )
def _lowercase ( self : Union[str, Any] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Dict ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def _lowercase ( self : Optional[int] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Tuple ) -> List[Any]:
"""simple docstring"""
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def _lowercase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__magic_name__ = self.tokenizer.model_input_names
__magic_name__ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 529
|
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
__lowerCAmelCase : Dict = 'src/transformers'
__lowerCAmelCase : List[Any] = 'docs/source/en'
__lowerCAmelCase : str = '.'
def a__ ( A_, A_, A_ ):
'''simple docstring'''
with open(A_, """r""", encoding="""utf-8""", newline="""\n""" ) as f:
__magic_name__ = f.readlines()
# Find the start prompt.
__magic_name__ = 0
while not lines[start_index].startswith(A_ ):
start_index += 1
start_index += 1
__magic_name__ = start_index
while not lines[end_index].startswith(A_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
__lowerCAmelCase : str = 'Model|Encoder|Decoder|ForConditionalGeneration'
# Regexes that match TF/Flax/PT model names.
__lowerCAmelCase : List[str] = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
__lowerCAmelCase : Any = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__lowerCAmelCase : Optional[int] = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# This is to make sure the transformers module imported is the one in the repo.
__lowerCAmelCase : Union[str, Any] = direct_transformers_import(TRANSFORMERS_PATH)
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""", A_ )
return [m.group(0 ) for m in matches]
def a__ ( A_, A_ ):
'''simple docstring'''
__magic_name__ = 2 if text == """✅""" or text == """❌""" else len(A_ )
__magic_name__ = (width - text_length) // 2
__magic_name__ = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def a__ ( ):
'''simple docstring'''
__magic_name__ = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__magic_name__ = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
__magic_name__ = {name: config.replace("""Config""", """""" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
__magic_name__ = collections.defaultdict(A_ )
__magic_name__ = collections.defaultdict(A_ )
__magic_name__ = collections.defaultdict(A_ )
__magic_name__ = collections.defaultdict(A_ )
__magic_name__ = collections.defaultdict(A_ )
# Let's lookup through all transformers object (once).
for attr_name in dir(A_ ):
__magic_name__ = None
if attr_name.endswith("""Tokenizer""" ):
__magic_name__ = slow_tokenizers
__magic_name__ = attr_name[:-9]
elif attr_name.endswith("""TokenizerFast""" ):
__magic_name__ = fast_tokenizers
__magic_name__ = attr_name[:-13]
elif _re_tf_models.match(A_ ) is not None:
__magic_name__ = tf_models
__magic_name__ = _re_tf_models.match(A_ ).groups()[0]
elif _re_flax_models.match(A_ ) is not None:
__magic_name__ = flax_models
__magic_name__ = _re_flax_models.match(A_ ).groups()[0]
elif _re_pt_models.match(A_ ) is not None:
__magic_name__ = pt_models
__magic_name__ = _re_pt_models.match(A_ ).groups()[0]
if lookup_dict is not None:
while len(A_ ) > 0:
if attr_name in model_name_to_prefix.values():
__magic_name__ = True
break
# Try again after removing the last word in the name
__magic_name__ = """""".join(camel_case_split(A_ )[:-1] )
# Let's build that table!
__magic_name__ = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
__magic_name__ = ["""Model""", """Tokenizer slow""", """Tokenizer fast""", """PyTorch support""", """TensorFlow support""", """Flax Support"""]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
__magic_name__ = [len(A_ ) + 2 for c in columns]
__magic_name__ = max([len(A_ ) for name in model_names] ) + 2
# Build the table per se
__magic_name__ = """|""" + """|""".join([_center_text(A_, A_ ) for c, w in zip(A_, A_ )] ) + """|\n"""
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths] ) + "|\n"
__magic_name__ = {True: """✅""", False: """❌"""}
for name in model_names:
__magic_name__ = model_name_to_prefix[name]
__magic_name__ = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(A_, A_ ) for l, w in zip(A_, A_ )] ) + "|\n"
return table
def a__ ( A_=False ):
'''simple docstring'''
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = _find_text_in_file(
filename=os.path.join(A_, """index.md""" ), start_prompt="""<!--This table is updated automatically from the auto modules""", end_prompt="""<!-- End table-->""", )
__magic_name__ = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(A_, """index.md""" ), """w""", encoding="""utf-8""", newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"""The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""" )
if __name__ == "__main__":
__lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__lowerCAmelCase : Optional[Any] = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 529
| 1
|
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__lowercase = logging.get_logger(__name__)
__lowercase = Dict[str, Any]
__lowercase = List[Prediction]
@add_end_docstrings(_UpperCAmelCase )
class _A ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Tuple , *A_ : Any , **A_ : Dict ) -> List[str]:
super().__init__(*A_ , **A_ )
if self.framework == "tf":
raise ValueError(f"The {self.__class__} is only available in PyTorch." )
requires_backends(self , '''vision''' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def lowercase ( self : Dict , **A_ : List[Any] ) -> Tuple:
__snake_case = {}
if "threshold" in kwargs:
__snake_case = kwargs['''threshold''']
return {}, {}, postprocess_kwargs
def __call__( self : int , *A_ : Optional[int] , **A_ : List[Any] ) -> Union[Predictions, List[Prediction]]:
return super().__call__(*A_ , **A_ )
def lowercase ( self : List[str] , A_ : List[str] ) -> List[str]:
__snake_case = load_image(A_ )
__snake_case = torch.IntTensor([[image.height, image.width]] )
__snake_case = self.image_processor(images=[image] , return_tensors='''pt''' )
if self.tokenizer is not None:
__snake_case = self.tokenizer(text=inputs['''words'''] , boxes=inputs['''boxes'''] , return_tensors='''pt''' )
__snake_case = target_size
return inputs
def lowercase ( self : List[str] , A_ : List[Any] ) -> Tuple:
__snake_case = model_inputs.pop('''target_size''' )
__snake_case = self.model(**A_ )
__snake_case = outputs.__class__({'''target_size''': target_size, **outputs} )
if self.tokenizer is not None:
__snake_case = model_inputs['''bbox''']
return model_outputs
def lowercase ( self : str , A_ : Tuple , A_ : List[str]=0.9 ) -> List[str]:
__snake_case = model_outputs['''target_size''']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
__snake_case , __snake_case = target_size[0].tolist()
def unnormalize(A_ : Dict ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1_000),
(height * bbox[1] / 1_000),
(width * bbox[2] / 1_000),
(height * bbox[3] / 1_000),
] ) )
__snake_case , __snake_case = model_outputs['''logits'''].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
__snake_case = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
__snake_case = [unnormalize(A_ ) for bbox in model_outputs['''bbox'''].squeeze(0 )]
__snake_case = ['''score''', '''label''', '''box''']
__snake_case = [dict(zip(A_ , A_ ) ) for vals in zip(scores.tolist() , A_ , A_ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
__snake_case = self.image_processor.post_process_object_detection(A_ , A_ , A_ )
__snake_case = raw_annotations[0]
__snake_case = raw_annotation['''scores''']
__snake_case = raw_annotation['''labels''']
__snake_case = raw_annotation['''boxes''']
__snake_case = scores.tolist()
__snake_case = [self.model.config.idalabel[label.item()] for label in labels]
__snake_case = [self._get_bounding_box(A_ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
__snake_case = ['''score''', '''label''', '''box''']
__snake_case = [
dict(zip(A_ , A_ ) )
for vals in zip(raw_annotation['''scores'''] , raw_annotation['''labels'''] , raw_annotation['''boxes'''] )
]
return annotation
def lowercase ( self : Any , A_ : "torch.Tensor" ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError('''The ObjectDetectionPipeline is only available in PyTorch.''' )
__snake_case , __snake_case , __snake_case , __snake_case = box.int().tolist()
__snake_case = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 703
|
"""simple docstring"""
import re
def SCREAMING_SNAKE_CASE ( snake_case):
__snake_case = re.compile(R'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''')
if match := re.search(snake_case, snake_case):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("+918827897895"))
| 93
| 0
|
'''simple docstring'''
def lowercase__ ( ) -> List[str]:
"""simple docstring"""
for n in range(1 , 1000000 ):
yield n * (n + 1) // 2
def lowercase__ ( __lowercase : Any ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = 1
__UpperCamelCase = 2
while i * i <= n:
__UpperCamelCase = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def lowercase__ ( ) -> List[Any]:
"""simple docstring"""
return next(i for i in triangle_number_generator() if count_divisors(__lowercase ) > 500 )
if __name__ == "__main__":
print(solution())
| 399
|
'''simple docstring'''
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
a__ : Union[str, Any] =logging.get_logger(__name__)
def lowercase__ ( __lowercase : bool , __lowercase : bool ) -> Any:
"""simple docstring"""
def run_func(__lowercase : Tuple ):
@wraps(__lowercase )
def run_in_eager_mode(*__lowercase : Any , **__lowercase : Any ):
return func(*__lowercase , **__lowercase )
@wraps(__lowercase )
@tf.function(experimental_compile=__lowercase )
def run_in_graph_mode(*__lowercase : List[str] , **__lowercase : Optional[int] ):
return func(*__lowercase , **__lowercase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def lowercase__ ( __lowercase : int , __lowercase : int , __lowercase : int ) -> ["tf.Tensor"]:
"""simple docstring"""
__UpperCamelCase = random.Random()
__UpperCamelCase = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(__lowercase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : TensorFlowBenchmarkArguments
SCREAMING_SNAKE_CASE_ : PretrainedConfig
SCREAMING_SNAKE_CASE_ : str ="TensorFlow"
@property
def _lowerCamelCase ( self : List[str] ):
return tf.__version__
def _lowerCamelCase ( self : Union[str, Any] , __A : str , __A : int , __A : int ):
# initialize GPU on separate process
__UpperCamelCase = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
__UpperCamelCase = self._prepare_inference_func(__A , __A , __A )
return self._measure_speed(_inference )
def _lowerCamelCase ( self : Dict , __A : str , __A : int , __A : int ):
__UpperCamelCase = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
__UpperCamelCase = self._prepare_train_func(__A , __A , __A )
return self._measure_speed(_train )
def _lowerCamelCase ( self : str , __A : str , __A : int , __A : int ):
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __A )
__UpperCamelCase = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
__UpperCamelCase = self._prepare_inference_func(__A , __A , __A )
return self._measure_memory(_inference )
def _lowerCamelCase ( self : Dict , __A : str , __A : int , __A : int ):
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __A )
__UpperCamelCase = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
__UpperCamelCase = self._prepare_train_func(__A , __A , __A )
return self._measure_memory(_train )
def _lowerCamelCase ( self : Union[str, Any] , __A : str , __A : int , __A : int ):
__UpperCamelCase = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
__UpperCamelCase = (
hasattr(__A , 'architectures' )
and isinstance(config.architectures , __A )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__UpperCamelCase = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
__UpperCamelCase = __import__('transformers' , fromlist=[model_class] )
__UpperCamelCase = getattr(__A , __A )
__UpperCamelCase = model_cls(__A )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
__UpperCamelCase = TF_MODEL_MAPPING[config.__class__](__A )
# encoder-decoder has vocab size saved differently
__UpperCamelCase = config.vocab_size if hasattr(__A , 'vocab_size' ) else config.encoder.vocab_size
__UpperCamelCase = random_input_ids(__A , __A , __A )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(__A , decoder_input_ids=__A , training=__A )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(__A , training=__A )
__UpperCamelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def _lowerCamelCase ( self : Any , __A : str , __A : int , __A : int ):
__UpperCamelCase = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.' )
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
__UpperCamelCase = (
hasattr(__A , 'architectures' )
and isinstance(config.architectures , __A )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__UpperCamelCase = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
__UpperCamelCase = __import__('transformers' , fromlist=[model_class] )
__UpperCamelCase = getattr(__A , __A )
__UpperCamelCase = model_cls(__A )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
__UpperCamelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__A )
# encoder-decoder has vocab size saved differently
__UpperCamelCase = config.vocab_size if hasattr(__A , 'vocab_size' ) else config.encoder.vocab_size
__UpperCamelCase = random_input_ids(__A , __A , __A )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
__UpperCamelCase = model(__A , decoder_input_ids=__A , labels=__A , training=__A )[0]
__UpperCamelCase = tf.gradients(__A , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
__UpperCamelCase = model(__A , labels=__A , training=__A )[0]
__UpperCamelCase = tf.gradients(__A , model.trainable_variables )
return gradients
__UpperCamelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def _lowerCamelCase ( self : List[Any] , __A : List[Any] ):
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('Do inference on TPU. Running model 5 times to stabilize compilation' )
timeit.repeat(__A , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
__UpperCamelCase = timeit.repeat(
__A , repeat=self.args.repeat , number=1_0 , )
return min(__A ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
def _lowerCamelCase ( self : Optional[int] , __A : Callable[[], None] ):
logger.info(
'Note that TensorFlow allocates more memory than '
'it might need to speed up computation. '
'The memory reported here corresponds to the memory '
'reported by `nvidia-smi`, which can vary depending '
'on total available memory on the GPU that is used.' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'
' consumption line by line.' )
__UpperCamelCase = start_memory_tracing('transformers' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'
' with `args.memory=False`' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'py3nvml not installed, we won\'t log GPU memory usage. '
'Install py3nvml (pip install py3nvml) to log information about GPU.' )
__UpperCamelCase = 'N/A'
else:
logger.info(
'Measuring total GPU usage on GPU device. Make sure to not have additional processes'
' running on the same GPU.' )
# init nvml
nvml.nvmlInit()
func()
__UpperCamelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
__UpperCamelCase = nvml.nvmlDeviceGetMemoryInfo(__A )
__UpperCamelCase = meminfo.used
__UpperCamelCase = Memory(__A )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'When enabling line by line tracing, the max peak memory for CPU is inaccurate in'
' TensorFlow.' )
__UpperCamelCase = None
else:
__UpperCamelCase = measure_peak_memory_cpu(__A )
__UpperCamelCase = Memory(__A ) if isinstance(__A , __A ) else memory_bytes
if self.args.trace_memory_line_by_line:
__UpperCamelCase = stop_memory_tracing(__A )
if memory is None:
__UpperCamelCase = summary.total
else:
__UpperCamelCase = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None
| 399
| 1
|
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int]=9_9 , SCREAMING_SNAKE_CASE__ : int=1_3 , SCREAMING_SNAKE_CASE__ : List[str]=7 , SCREAMING_SNAKE_CASE__ : List[str]=9 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Optional[int]=False , SCREAMING_SNAKE_CASE__ : str=3_2 , SCREAMING_SNAKE_CASE__ : Any=5 , SCREAMING_SNAKE_CASE__ : Any=4 , SCREAMING_SNAKE_CASE__ : List[str]=3_7 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=8 , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=0.002 , SCREAMING_SNAKE_CASE__ : Tuple=1 , SCREAMING_SNAKE_CASE__ : Tuple=0 , SCREAMING_SNAKE_CASE__ : int=0 , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Dict=None , ) -> List[str]:
a_ : Tuple = parent
a_ : Tuple = batch_size
a_ : Optional[int] = encoder_seq_length
a_ : str = decoder_seq_length
# For common tests
a_ : Optional[Any] = self.decoder_seq_length
a_ : Union[str, Any] = is_training
a_ : Union[str, Any] = use_attention_mask
a_ : Tuple = use_labels
a_ : List[Any] = vocab_size
a_ : List[Any] = hidden_size
a_ : str = num_hidden_layers
a_ : Tuple = num_attention_heads
a_ : List[str] = d_ff
a_ : Dict = relative_attention_num_buckets
a_ : Dict = dropout_rate
a_ : Optional[int] = initializer_factor
a_ : Dict = eos_token_id
a_ : str = pad_token_id
a_ : int = decoder_start_token_id
a_ : Optional[Any] = None
a_ : int = decoder_layers
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
return TaConfig.from_pretrained('google/umt5-base' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : List[Any]=None , ) -> Optional[int]:
if attention_mask is None:
a_ : int = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
a_ : Any = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
a_ : Dict = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE__ )
if decoder_head_mask is None:
a_ : Optional[int] = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE__ )
if cross_attn_head_mask is None:
a_ : str = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
a_ : List[str] = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
a_ : str = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
a_ : Dict = input_ids.clamp(self.pad_token_id + 1 )
a_ : Tuple = decoder_input_ids.clamp(self.pad_token_id + 1 )
a_ : Dict = self.get_config()
a_ : List[str] = config.num_attention_heads
a_ : Dict = self.prepare_inputs_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return config, input_dict
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
a_ , a_ : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
return TaConfig(
vocab_size=1_6_6 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , ) -> str:
a_ : List[Any] = UMTaModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : int = model(
input_ids=SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , )
a_ : List[str] = model(input_ids=SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ )
a_ : Any = result.last_hidden_state
a_ : Dict = result.past_key_values
a_ : int = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> int:
a_ : List[Any] = UMTaModel(config=SCREAMING_SNAKE_CASE__ ).get_decoder().to(SCREAMING_SNAKE_CASE__ ).eval()
# first forward pass
a_ : Optional[int] = model(SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ )
a_ : Dict = model(SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = model(SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) + 1 )
a_ , a_ : int = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
a_ : List[str] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
a_ : Dict = torch.cat([input_ids, next_tokens] , dim=-1 )
a_ : Optional[int] = model(SCREAMING_SNAKE_CASE__ )['last_hidden_state']
a_ : Dict = model(SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ )['last_hidden_state']
# select random slice
a_ : str = ids_tensor((1,) , output_from_past.shape[-1] ).item()
a_ : str = output_from_no_past[:, -1, random_slice_idx].detach()
a_ : str = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) )
def SCREAMING_SNAKE_CASE ( self : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , ) -> Optional[Any]:
a_ : int = UMTaModel(config=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ).half().eval()
a_ : Dict = model(**SCREAMING_SNAKE_CASE__ )['last_hidden_state']
self.parent.assertFalse(torch.isnan(SCREAMING_SNAKE_CASE__ ).any().item() )
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
snake_case__ : List[str] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
snake_case__ : Union[str, Any] = (UMTaForConditionalGeneration,) if is_torch_available() else ()
snake_case__ : Optional[Any] = (
{
'''conversational''': UMTaForConditionalGeneration,
'''feature-extraction''': UMTaModel,
'''summarization''': UMTaForConditionalGeneration,
'''text2text-generation''': UMTaForConditionalGeneration,
'''translation''': UMTaForConditionalGeneration,
'''question-answering''': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
snake_case__ : str = True
snake_case__ : Dict = False
snake_case__ : Any = False
snake_case__ : Optional[Any] = True
snake_case__ : List[Any] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
snake_case__ : Optional[Any] = [0.8, 0.9]
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
a_ : Optional[int] = UMTaModelTester(self )
@unittest.skip('Test has a segmentation fault on torch 1.8.0' )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
a_ : Tuple = self.model_tester.prepare_config_and_inputs()
a_ : Union[str, Any] = UMTaModel(config_and_inputs[0] ).to(SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
SCREAMING_SNAKE_CASE__ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"""{tmpdirname}/t5_test.onnx""" , export_params=SCREAMING_SNAKE_CASE__ , opset_version=9 , input_names=['input_ids', 'decoder_input_ids'] , )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
a_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
a_ : Union[str, Any] = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
a_ : List[str] = self.model_tester.prepare_config_and_inputs()
a_ : Optional[int] = config_and_inputs[0]
a_ : Optional[Any] = UMTaForConditionalGeneration(SCREAMING_SNAKE_CASE__ ).eval()
model.to(SCREAMING_SNAKE_CASE__ )
a_ : List[str] = {
'head_mask': torch.zeros(config.num_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ),
'decoder_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ),
}
for attn_name, (name, mask) in zip(SCREAMING_SNAKE_CASE__ , head_masking.items() ):
a_ : List[str] = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
a_ : Dict = torch.ones(
config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ )
a_ : Any = model.generate(
config_and_inputs[1]['input_ids'] , num_beams=1 , max_length=3 , output_attentions=SCREAMING_SNAKE_CASE__ , return_dict_in_generate=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
# We check the state of decoder_attentions and cross_attentions just from the last step
a_ : List[Any] = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
a_ : Tuple = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' , return_dict=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
a_ : Tuple = AutoTokenizer.from_pretrained('google/umt5-small' , use_fast=SCREAMING_SNAKE_CASE__ , legacy=SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
a_ : Optional[Any] = tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors='pt' , padding=SCREAMING_SNAKE_CASE__ ).input_ids
# fmt: off
a_ : str = torch.tensor(
[
[ 3_8_5_3_0, 2_1_0_7_0_3, 2_5_6_2_9_9, 1_4_1_0, 2_5_6_2_9_8, 2_7_4, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_2_6, 3_2_1, 6_7_1, 2_5_9_2_2, 2_5_6_2_9_9, 2_7_4, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1_4_6_0, 3_3_9, 3_1_2, 1_9_0_1_4, 1_0_6_2_0, 7_5_8, 2_5_6_2_9_9, 2_3_5_5,2_7_4, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_1_7, 2_5_6_2_9_9, 1_4_8_6_9, 2_8_1, 3_0_1, 2_5_6_2_9_8, 2_7_5, 1_1_9_9_8_3,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_2_0, 2_5_6_2_9_9, 1_4_8_6_9, 2_8_1, 2_2_3_4, 2_8_9, 2_2_7_5, 3_3_3,6_1_3_9_1, 2_8_9, 2_5_6_2_9_8, 5_4_3, 2_5_6_2_9_7, 1_6_8_7_1_4, 3_2_9, 2_5_6_2_9_6,2_7_4, 1],
] )
# fmt: on
torch.testing.assert_allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = model.generate(input_ids.to(SCREAMING_SNAKE_CASE__ ) )
a_ : Union[str, Any] = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
a_ : str = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 443
|
UpperCAmelCase_ : Optional[int] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
UpperCAmelCase_ : List[str] = [{'type': 'code', 'content': INSTALL_CONTENT}]
UpperCAmelCase_ : int = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 443
| 1
|
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCAmelCase ( a__ , a__ , a__ , a__ , a__):
'''simple docstring'''
a_ : List[Any] = TapasConfig.from_json_file(a__)
# set absolute/relative position embeddings parameter
a_ : Dict = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
a_ : str = TapasForQuestionAnswering(config=a__)
elif task == "WTQ":
# run_task_main.py hparams
a_ : Optional[Any] = 4
a_ : Union[str, Any] = True
# hparam_utils.py hparams
a_ : str = 0.66_4694
a_ : List[Any] = 0.20_7951
a_ : Optional[int] = 0.12_1194
a_ : List[Any] = True
a_ : Tuple = True
a_ : Any = False
a_ : List[str] = 0.035_2513
a_ : Optional[Any] = TapasForQuestionAnswering(config=a__)
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
a_ : Dict = 4
a_ : Dict = False
# hparam_utils.py hparams
a_ : Any = 36.4519
a_ : List[Any] = 0.90_3421
a_ : Union[str, Any] = 222.088
a_ : List[Any] = True
a_ : Optional[int] = True
a_ : str = True
a_ : Optional[int] = 0.76_3141
a_ : Any = TapasForQuestionAnswering(config=a__)
elif task == "TABFACT":
a_ : List[Any] = TapasForSequenceClassification(config=a__)
elif task == "MLM":
a_ : Tuple = TapasForMaskedLM(config=a__)
elif task == "INTERMEDIATE_PRETRAINING":
a_ : Any = TapasModel(config=a__)
else:
raise ValueError(f'''Task {task} not supported.''')
print(f'''Building PyTorch model from configuration: {config}''')
# Load weights from tf checkpoint
load_tf_weights_in_tapas(a__ , a__ , a__)
# Save pytorch-model (weights and configuration)
print(f'''Save PyTorch model to {pytorch_dump_path}''')
model.save_pretrained(a__)
# Save tokenizer files
print(f'''Save tokenizer files to {pytorch_dump_path}''')
a_ : Tuple = TapasTokenizer(vocab_file=tf_checkpoint_path[:-1_0] + """vocab.txt""" , model_max_length=5_1_2)
tokenizer.save_pretrained(a__)
print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell)
if __name__ == "__main__":
__snake_case : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA."""
)
parser.add_argument(
"""--reset_position_index_per_cell""",
default=False,
action="""store_true""",
help="""Whether to use relative position embeddings or not. Defaults to True.""",
)
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--tapas_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained TAPAS model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__snake_case : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 540
|
from __future__ import annotations
def _UpperCAmelCase ( a__):
'''simple docstring'''
if len(a__) == 0:
return []
a_ , a_ : List[Any] = min(a__), max(a__)
a_ : Tuple = int(max_value - min_value) + 1
a_ : list[list] = [[] for _ in range(a__)]
for i in my_list:
buckets[int(i - min_value)].append(a__)
return [v for bucket in buckets for v in sorted(a__)]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 540
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json''',
}
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = "gpt_neox_japanese"
def __init__(self , _UpperCAmelCase=3_2_0_0_0 , _UpperCAmelCase=2_5_6_0 , _UpperCAmelCase=3_2 , _UpperCAmelCase=3_2 , _UpperCAmelCase=4 , _UpperCAmelCase="gelu" , _UpperCAmelCase=1.00 , _UpperCAmelCase=1_0_0_0_0 , _UpperCAmelCase=2_0_4_8 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-5 , _UpperCAmelCase=True , _UpperCAmelCase=3_1_9_9_6 , _UpperCAmelCase=3_1_9_9_9 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , **_UpperCAmelCase , ) -> str:
super().__init__(bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = vocab_size
__UpperCamelCase : List[str] = max_position_embeddings
__UpperCamelCase : Dict = hidden_size
__UpperCamelCase : str = num_hidden_layers
__UpperCamelCase : List[str] = num_attention_heads
__UpperCamelCase : Any = intermediate_multiple_size
__UpperCamelCase : List[str] = hidden_act
__UpperCamelCase : List[str] = rotary_pct
__UpperCamelCase : List[str] = rotary_emb_base
__UpperCamelCase : Any = initializer_range
__UpperCamelCase : Optional[int] = layer_norm_eps
__UpperCamelCase : Union[str, Any] = use_cache
__UpperCamelCase : Dict = attention_dropout
__UpperCamelCase : List[Any] = hidden_dropout
| 399
|
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ ):
if not grid or not grid[0]:
raise TypeError("The grid does not contain the appropriate information" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
__UpperCamelCase : List[str] = grid[0]
for row_n in range(1 , len(snake_case__ ) ):
__UpperCamelCase : Tuple = grid[row_n]
__UpperCamelCase : int = fill_row(snake_case__ , snake_case__ )
__UpperCamelCase : List[str] = grid[row_n]
return grid[-1][-1]
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
current_row[0] += row_above[0]
for cell_n in range(1 , len(snake_case__ ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 399
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : List[str] = {
'configuration_clipseg': [
'CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP',
'CLIPSegConfig',
'CLIPSegTextConfig',
'CLIPSegVisionConfig',
],
'processing_clipseg': ['CLIPSegProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = [
'CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST',
'CLIPSegModel',
'CLIPSegPreTrainedModel',
'CLIPSegTextModel',
'CLIPSegVisionModel',
'CLIPSegForImageSegmentation',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
a__ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 601
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class UpperCAmelCase__ :
def __init__( self , lowercase , ) -> List[str]:
__UpperCamelCase = parent
__UpperCamelCase = 1_3
__UpperCamelCase = 7
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = True
__UpperCamelCase = 9_9
__UpperCamelCase = 3_2
__UpperCamelCase = 2
__UpperCamelCase = 4
__UpperCamelCase = 3_7
__UpperCamelCase = """gelu"""
__UpperCamelCase = 0.1
__UpperCamelCase = 0.1
__UpperCamelCase = 5_1_2
__UpperCamelCase = 1_6
__UpperCamelCase = 2
__UpperCamelCase = 0.02
__UpperCamelCase = 3
__UpperCamelCase = 4
__UpperCamelCase = None
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Union[str, Any]:
__UpperCamelCase = TFDistilBertModel(config=lowercase )
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__UpperCamelCase = model(lowercase )
__UpperCamelCase = [input_ids, input_mask]
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
__UpperCamelCase = TFDistilBertForMaskedLM(config=lowercase )
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Tuple:
__UpperCamelCase = TFDistilBertForQuestionAnswering(config=lowercase )
__UpperCamelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Union[str, Any]:
__UpperCamelCase = self.num_labels
__UpperCamelCase = TFDistilBertForSequenceClassification(lowercase )
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[str]:
__UpperCamelCase = self.num_choices
__UpperCamelCase = TFDistilBertForMultipleChoice(lowercase )
__UpperCamelCase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
__UpperCamelCase = self.num_labels
__UpperCamelCase = TFDistilBertForTokenClassification(lowercase )
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = self.prepare_config_and_inputs()
((__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase)) = config_and_inputs
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase):
__SCREAMING_SNAKE_CASE = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
__SCREAMING_SNAKE_CASE = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = TFDistilBertModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=lowercase , dim=3_7 )
def __lowerCamelCase ( self ) -> str:
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowercase )
def __lowerCamelCase ( self ) -> int:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowercase )
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowercase )
def __lowerCamelCase ( self ) -> Tuple:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowercase )
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowercase )
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowercase )
@slow
def __lowerCamelCase ( self ) -> Union[str, Any]:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
__UpperCamelCase = TFDistilBertModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = TFDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
__UpperCamelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCamelCase = model(lowercase )[0]
__UpperCamelCase = [1, 6, 7_6_8]
self.assertEqual(output.shape , lowercase )
__UpperCamelCase = tf.constant(
[
[
[0.19_261_885, -0.13_732_955, 0.4_119_799],
[0.22_150_156, -0.07_422_661, 0.39_037_204],
[0.22_756_018, -0.0_896_414, 0.3_701_467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-4 )
| 601
| 1
|
"""simple docstring"""
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
snake_case__ : Optional[int] = logging.get_logger(__name__)
snake_case__ : str = OrderedDict(
[
# Base model mapping
('''albert''', '''FlaxAlbertModel'''),
('''bart''', '''FlaxBartModel'''),
('''beit''', '''FlaxBeitModel'''),
('''bert''', '''FlaxBertModel'''),
('''big_bird''', '''FlaxBigBirdModel'''),
('''blenderbot''', '''FlaxBlenderbotModel'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''),
('''clip''', '''FlaxCLIPModel'''),
('''distilbert''', '''FlaxDistilBertModel'''),
('''electra''', '''FlaxElectraModel'''),
('''gpt-sw3''', '''FlaxGPT2Model'''),
('''gpt2''', '''FlaxGPT2Model'''),
('''gpt_neo''', '''FlaxGPTNeoModel'''),
('''gptj''', '''FlaxGPTJModel'''),
('''longt5''', '''FlaxLongT5Model'''),
('''marian''', '''FlaxMarianModel'''),
('''mbart''', '''FlaxMBartModel'''),
('''mt5''', '''FlaxMT5Model'''),
('''opt''', '''FlaxOPTModel'''),
('''pegasus''', '''FlaxPegasusModel'''),
('''regnet''', '''FlaxRegNetModel'''),
('''resnet''', '''FlaxResNetModel'''),
('''roberta''', '''FlaxRobertaModel'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''),
('''roformer''', '''FlaxRoFormerModel'''),
('''t5''', '''FlaxT5Model'''),
('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''),
('''vit''', '''FlaxViTModel'''),
('''wav2vec2''', '''FlaxWav2Vec2Model'''),
('''whisper''', '''FlaxWhisperModel'''),
('''xglm''', '''FlaxXGLMModel'''),
('''xlm-roberta''', '''FlaxXLMRobertaModel'''),
]
)
snake_case__ : str = OrderedDict(
[
# Model for pre-training mapping
('''albert''', '''FlaxAlbertForPreTraining'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForPreTraining'''),
('''big_bird''', '''FlaxBigBirdForPreTraining'''),
('''electra''', '''FlaxElectraForPreTraining'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
snake_case__ : int = OrderedDict(
[
# Model for Masked LM mapping
('''albert''', '''FlaxAlbertForMaskedLM'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForMaskedLM'''),
('''big_bird''', '''FlaxBigBirdForMaskedLM'''),
('''distilbert''', '''FlaxDistilBertForMaskedLM'''),
('''electra''', '''FlaxElectraForMaskedLM'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
snake_case__ : Union[str, Any] = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''),
('''encoder-decoder''', '''FlaxEncoderDecoderModel'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''marian''', '''FlaxMarianMTModel'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''pegasus''', '''FlaxPegasusForConditionalGeneration'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
]
)
snake_case__ : Any = OrderedDict(
[
# Model for Image-classsification
('''beit''', '''FlaxBeitForImageClassification'''),
('''regnet''', '''FlaxRegNetForImageClassification'''),
('''resnet''', '''FlaxResNetForImageClassification'''),
('''vit''', '''FlaxViTForImageClassification'''),
]
)
snake_case__ : Dict = OrderedDict(
[
('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''),
]
)
snake_case__ : List[str] = OrderedDict(
[
# Model for Causal LM mapping
('''bart''', '''FlaxBartForCausalLM'''),
('''bert''', '''FlaxBertForCausalLM'''),
('''big_bird''', '''FlaxBigBirdForCausalLM'''),
('''electra''', '''FlaxElectraForCausalLM'''),
('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''),
('''gpt2''', '''FlaxGPT2LMHeadModel'''),
('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''),
('''gptj''', '''FlaxGPTJForCausalLM'''),
('''opt''', '''FlaxOPTForCausalLM'''),
('''roberta''', '''FlaxRobertaForCausalLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''),
('''xglm''', '''FlaxXGLMForCausalLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''),
]
)
snake_case__ : str = OrderedDict(
[
# Model for Sequence Classification mapping
('''albert''', '''FlaxAlbertForSequenceClassification'''),
('''bart''', '''FlaxBartForSequenceClassification'''),
('''bert''', '''FlaxBertForSequenceClassification'''),
('''big_bird''', '''FlaxBigBirdForSequenceClassification'''),
('''distilbert''', '''FlaxDistilBertForSequenceClassification'''),
('''electra''', '''FlaxElectraForSequenceClassification'''),
('''mbart''', '''FlaxMBartForSequenceClassification'''),
('''roberta''', '''FlaxRobertaForSequenceClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''),
('''roformer''', '''FlaxRoFormerForSequenceClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''),
]
)
snake_case__ : Tuple = OrderedDict(
[
# Model for Question Answering mapping
('''albert''', '''FlaxAlbertForQuestionAnswering'''),
('''bart''', '''FlaxBartForQuestionAnswering'''),
('''bert''', '''FlaxBertForQuestionAnswering'''),
('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''),
('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''),
('''electra''', '''FlaxElectraForQuestionAnswering'''),
('''mbart''', '''FlaxMBartForQuestionAnswering'''),
('''roberta''', '''FlaxRobertaForQuestionAnswering'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''),
('''roformer''', '''FlaxRoFormerForQuestionAnswering'''),
('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''),
]
)
snake_case__ : int = OrderedDict(
[
# Model for Token Classification mapping
('''albert''', '''FlaxAlbertForTokenClassification'''),
('''bert''', '''FlaxBertForTokenClassification'''),
('''big_bird''', '''FlaxBigBirdForTokenClassification'''),
('''distilbert''', '''FlaxDistilBertForTokenClassification'''),
('''electra''', '''FlaxElectraForTokenClassification'''),
('''roberta''', '''FlaxRobertaForTokenClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''),
('''roformer''', '''FlaxRoFormerForTokenClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''),
]
)
snake_case__ : Dict = OrderedDict(
[
# Model for Multiple Choice mapping
('''albert''', '''FlaxAlbertForMultipleChoice'''),
('''bert''', '''FlaxBertForMultipleChoice'''),
('''big_bird''', '''FlaxBigBirdForMultipleChoice'''),
('''distilbert''', '''FlaxDistilBertForMultipleChoice'''),
('''electra''', '''FlaxElectraForMultipleChoice'''),
('''roberta''', '''FlaxRobertaForMultipleChoice'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''),
('''roformer''', '''FlaxRoFormerForMultipleChoice'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''),
]
)
snake_case__ : Optional[int] = OrderedDict(
[
('''bert''', '''FlaxBertForNextSentencePrediction'''),
]
)
snake_case__ : List[str] = OrderedDict(
[
('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
]
)
snake_case__ : str = OrderedDict(
[
('''whisper''', '''FlaxWhisperForAudioClassification'''),
]
)
snake_case__ : List[str] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
snake_case__ : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
snake_case__ : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
snake_case__ : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
snake_case__ : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
snake_case__ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
snake_case__ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
snake_case__ : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
snake_case__ : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
snake_case__ : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
snake_case__ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
snake_case__ : str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
snake_case__ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
snake_case__ : str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class snake_case_( _BaseAutoModelClass ):
__UpperCamelCase = FLAX_MODEL_MAPPING
snake_case__ : List[Any] = auto_class_update(FlaxAutoModel)
class snake_case_( _BaseAutoModelClass ):
__UpperCamelCase = FLAX_MODEL_FOR_PRETRAINING_MAPPING
snake_case__ : int = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''')
class snake_case_( _BaseAutoModelClass ):
__UpperCamelCase = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
snake_case__ : List[str] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''')
class snake_case_( _BaseAutoModelClass ):
__UpperCamelCase = FLAX_MODEL_FOR_MASKED_LM_MAPPING
snake_case__ : int = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''')
class snake_case_( _BaseAutoModelClass ):
__UpperCamelCase = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
snake_case__ : Dict = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base'''
)
class snake_case_( _BaseAutoModelClass ):
__UpperCamelCase = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
snake_case__ : int = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='''sequence classification'''
)
class snake_case_( _BaseAutoModelClass ):
__UpperCamelCase = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
snake_case__ : Optional[int] = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''')
class snake_case_( _BaseAutoModelClass ):
__UpperCamelCase = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
snake_case__ : List[str] = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='''token classification'''
)
class snake_case_( _BaseAutoModelClass ):
__UpperCamelCase = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
snake_case__ : Any = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''')
class snake_case_( _BaseAutoModelClass ):
__UpperCamelCase = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
snake_case__ : Union[str, Any] = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction'''
)
class snake_case_( _BaseAutoModelClass ):
__UpperCamelCase = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
snake_case__ : int = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='''image classification'''
)
class snake_case_( _BaseAutoModelClass ):
__UpperCamelCase = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
snake_case__ : Any = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''')
class snake_case_( _BaseAutoModelClass ):
__UpperCamelCase = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
snake_case__ : List[Any] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling'''
)
| 702
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
class snake_case_( a__ ):
pass
class snake_case_:
def __init__( self : Any , UpperCamelCase_ : Any ):
lowerCAmelCase : Any = data
lowerCAmelCase : Node | None = None
def __iter__( self : int ):
lowerCAmelCase : Any = self
lowerCAmelCase : Union[str, Any] = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(UpperCamelCase_ )
yield node.data
lowerCAmelCase : Optional[int] = node.next_node
@property
def lowerCamelCase__ ( self : str ):
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
snake_case__ : Dict = Node(1)
snake_case__ : Any = Node(2)
snake_case__ : int = Node(3)
snake_case__ : Any = Node(4)
print(root_node.has_loop) # False
snake_case__ : Tuple = root_node.next_node
print(root_node.has_loop) # True
snake_case__ : List[Any] = Node(5)
snake_case__ : int = Node(6)
snake_case__ : List[Any] = Node(5)
snake_case__ : Dict = Node(6)
print(root_node.has_loop) # False
snake_case__ : Any = Node(1)
print(root_node.has_loop) # False
| 637
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase_ = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''ViTFeatureExtractor''']
lowerCAmelCase_ = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 60
|
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
lowerCamelCase__ = logging.getLogger(__name__)
lowerCamelCase__ = 'pytorch_model.bin'
@dataclasses.dataclass
class UpperCamelCase :
__UpperCamelCase = dataclasses.field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models."""} )
__UpperCamelCase = dataclasses.field(
default=snake_case__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co."""} , )
@dataclasses.dataclass
class UpperCamelCase :
__UpperCamelCase = dataclasses.field(metadata={"""help""": """A csv or a json file containing the training data."""} )
__UpperCamelCase = dataclasses.field(metadata={"""help""": """A csv or a json file containing the data to predict on."""} )
__UpperCamelCase = dataclasses.field(
default=snake_case__ , metadata={"""help""": """A csv or a json file containing the validation data."""} )
__UpperCamelCase = dataclasses.field(
default=snake_case__ , metadata={"""help""": """The name of the task to train on."""} , )
__UpperCamelCase = dataclasses.field(
default=snake_case__ , metadata={"""help""": """The list of labels for the task."""} )
@dataclasses.dataclass
class UpperCamelCase :
__UpperCamelCase = dataclasses.field(
metadata={"""help""": """The output directory where the model predictions and checkpoints will be written."""} )
__UpperCamelCase = dataclasses.field(
default="""accuracy""" , metadata={"""help""": """The evaluation metric used for the task."""} )
__UpperCamelCase = dataclasses.field(
default="""no""" , metadata={
"""help""": """The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"""
} , )
__UpperCamelCase = dataclasses.field(
default=10 , metadata={"""help""": """Number of evaluation calls with no improvement after which training will be stopped."""} , )
__UpperCamelCase = dataclasses.field(
default=0.0 , metadata={
"""help""": """How much the specified evaluation metric must improve to satisfy early stopping conditions."""
} , )
__UpperCamelCase = dataclasses.field(
default=snake_case__ , metadata={"""help""": """Whether to filter the pseudo-labeled data based on the confidence score."""} , )
__UpperCamelCase = dataclasses.field(
default=snake_case__ , metadata={"""help""": """Whether to filter the pseudo-labeled data based on the validation performance."""} , )
__UpperCamelCase = dataclasses.field(
default=snake_case__ , metadata={"""help""": """Whether to fine-tune on labeled data after pseudo training."""} , )
__UpperCamelCase = dataclasses.field(
default=0.0 , metadata={"""help""": """Confidence threshold for pseudo-labeled data filtering."""} , )
__UpperCamelCase = dataclasses.field(
default=1_00 , metadata={"""help""": """Number of evaluation calls with no improvement after which training will be stopped."""} , )
__UpperCamelCase = dataclasses.field(
default=snake_case__ , metadata={"""help""": """Random seed for initialization."""} , )
def _lowerCamelCase( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> List[str]:
__snake_case = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
__snake_case = dataset.filter(lambda __snake_case : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
__snake_case = int(eval_result * len(__snake_case ) )
print(__snake_case )
__snake_case = dataset.sort("probability" , reverse=__snake_case )
__snake_case = dataset.select(range(__snake_case ) )
__snake_case = dataset.remove_columns(["label", "probability"] )
__snake_case = dataset.rename_column("prediction" , "label" )
__snake_case = dataset.map(lambda __snake_case : {"label": idalabel[example["label"]]} )
__snake_case = dataset.shuffle(seed=args.seed )
__snake_case = os.path.join(__snake_case , f"""train_pseudo.{args.data_file_extension}""" )
if args.data_file_extension == "csv":
dataset.to_csv(__snake_case , index=__snake_case )
else:
dataset.to_json(__snake_case )
def _lowerCamelCase( __snake_case , __snake_case , __snake_case , __snake_case , **__snake_case ) -> Dict:
__snake_case = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
__snake_case = STModelArguments(model_name_or_path=__snake_case )
__snake_case = STDataArguments(train_file=__snake_case , infer_file=__snake_case )
__snake_case = STTrainingArguments(output_dir=__snake_case )
__snake_case = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(__snake_case ).items():
setattr(__snake_case , __snake_case , __snake_case )
for key, value in kwargs.items():
if hasattr(__snake_case , __snake_case ):
setattr(__snake_case , __snake_case , __snake_case )
# Sanity checks
__snake_case = {}
__snake_case = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
__snake_case = args.train_file
__snake_case = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
__snake_case = args.eval_file
for key in data_files:
__snake_case = data_files[key].split("." )[-1]
assert extension in ["csv", "json"], f"""`{key}_file` should be a csv or a json file."""
if args.data_file_extension is None:
__snake_case = extension
else:
assert extension == args.data_file_extension, f"""`{key}_file` should be a {args.data_file_extension} file`."""
assert (
args.eval_metric in datasets.list_metrics()
), f"""{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}."""
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info("Creating the initial data directory for self-training..." )
__snake_case = f"""{args.output_dir}/self-train_iter-{{}}""".format
__snake_case = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=__snake_case )
os.makedirs(__snake_case , exist_ok=__snake_case )
accelerator.wait_for_everyone()
__snake_case = None
__snake_case = None
__snake_case = 0
__snake_case = False
# Show the progress bar
__snake_case = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
__snake_case = data_dir_format(__snake_case )
assert os.path.exists(__snake_case )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
__snake_case = os.path.join(__snake_case , "stage-1" )
__snake_case = {
"accelerator": accelerator,
"model_name_or_path": args.model_name_or_path,
"cache_dir": args.cache_dir,
"do_train": True,
"train_file": data_files["train"] if iteration == 0 else data_files["train_pseudo"],
"do_eval": True if args.eval_file is not None else False,
"eval_file": data_files["eval"],
"do_predict": True,
"infer_file": data_files["infer"],
"task_name": args.task_name,
"label_list": args.label_list,
"output_dir": current_output_dir,
"eval_metric": args.eval_metric,
"evaluation_strategy": args.evaluation_strategy,
"early_stopping_patience": args.early_stopping_patience,
"early_stopping_threshold": args.early_stopping_threshold,
"seed": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(__snake_case , __snake_case ):
arguments_dict.update({key: value} )
__snake_case = os.path.join(__snake_case , "best-checkpoint" , __snake_case )
if os.path.exists(__snake_case ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1." , __snake_case , __snake_case , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 1 *****" , __snake_case )
finetune(**__snake_case )
accelerator.wait_for_everyone()
assert os.path.exists(__snake_case )
logger.info("Self-training job completed: iteration: %d, stage: 1." , __snake_case )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
__snake_case = os.path.join(__snake_case , "best-checkpoint" )
__snake_case = os.path.join(__snake_case , "stage-2" )
# Update arguments_dict
__snake_case = model_path
__snake_case = data_files["train"]
__snake_case = current_output_dir
__snake_case = os.path.join(__snake_case , "best-checkpoint" , __snake_case )
if os.path.exists(__snake_case ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2." , __snake_case , __snake_case , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 2 *****" , __snake_case )
finetune(**__snake_case )
accelerator.wait_for_everyone()
assert os.path.exists(__snake_case )
logger.info("Self-training job completed: iteration: %d, stage: 2." , __snake_case )
__snake_case = iteration
__snake_case = data_dir_format(iteration + 1 )
__snake_case = AutoConfig.from_pretrained(os.path.join(__snake_case , "best-checkpoint" ) )
__snake_case = config.idalabel
__snake_case = os.path.join(__snake_case , "eval_results_best-checkpoint.json" )
__snake_case = os.path.join(__snake_case , "test_results_best-checkpoint.json" )
assert os.path.exists(__snake_case )
with open(__snake_case , "r" ) as f:
__snake_case = float(json.load(__snake_case )[args.eval_metric] )
__snake_case = os.path.join(__snake_case , "infer_output_best-checkpoint.csv" )
assert os.path.exists(__snake_case )
# Loading the dataset from local csv or json files.
__snake_case = load_dataset(args.data_file_extension , data_files={"data": data_files["infer"]} )["data"]
__snake_case = load_dataset("csv" , data_files={"data": infer_output_file} )["data"]
if accelerator.is_main_process:
os.makedirs(__snake_case , exist_ok=__snake_case )
shutil.copy(__snake_case , os.path.join(__snake_case , f"""eval_results_iter-{iteration}.json""" ) )
if os.path.exists(__snake_case ):
shutil.copy(__snake_case , os.path.join(__snake_case , f"""test_results_iter-{iteration}.json""" ) )
create_pseudo_labeled_data(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
accelerator.wait_for_everyone()
__snake_case = os.path.join(__snake_case , f"""train_pseudo.{args.data_file_extension}""" )
if args.evaluation_strategy != IntervalStrategy.NO.value:
__snake_case = eval_result
if best_iteration is None:
__snake_case = new_iteration
__snake_case = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
__snake_case = new_iteration
__snake_case = new_eval_result
__snake_case = 0
else:
if new_eval_result == best_eval_result:
__snake_case = new_iteration
__snake_case = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
__snake_case = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("Best iteration: %d" , __snake_case )
logger.info("Best evaluation result: %s = %f" , args.eval_metric , __snake_case )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(__snake_case , f"""eval_results_iter-{iteration}.json""" ) , os.path.join(__snake_case , "eval_results_best-iteration.json" ) , )
else:
# Assume that the last iteration is the best
logger.info("Best iteration: %d" , args.max_selftrain_iterations - 1 )
logger.info("Best evaluation result: %s = %f" , args.eval_metric , __snake_case )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(__snake_case , f"""eval_results_iter-{args.max_selftrain_iterations - 1}.json""" ) , os.path.join(__snake_case , "eval_results_best-iteration.json" ) , )
| 524
| 0
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = 42
class A ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
@register_to_config
def __init__( self : Any,lowercase_ : int = 6_5_5_3_6,lowercase_ : Optional[int] = None,lowercase_ : int = 2,lowercase_ : int = 2,lowercase_ : int = 0,lowercase_ : str = "fourier",lowercase_ : bool = True,lowercase_ : bool = False,lowercase_ : float = 0.0,lowercase_ : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D"),lowercase_ : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip"),lowercase_ : Tuple[str] = "UNetMidBlock1D",lowercase_ : str = None,lowercase_ : Tuple[int] = (3_2, 3_2, 6_4),lowercase_ : str = None,lowercase_ : int = 8,lowercase_ : int = 1,lowercase_ : bool = False,)-> Union[str, Any]:
'''simple docstring'''
super().__init__()
A__ = sample_size
# time
if time_embedding_type == "fourier":
A__ = GaussianFourierProjection(
embedding_size=8,set_W_to_weight=lowercase_,log=lowercase_,flip_sin_to_cos=lowercase_ )
A__ = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
A__ = Timesteps(
block_out_channels[0],flip_sin_to_cos=lowercase_,downscale_freq_shift=lowercase_ )
A__ = block_out_channels[0]
if use_timestep_embedding:
A__ = block_out_channels[0] * 4
A__ = TimestepEmbedding(
in_channels=lowercase_,time_embed_dim=lowercase_,act_fn=lowercase_,out_dim=block_out_channels[0],)
A__ = nn.ModuleList([] )
A__ = None
A__ = nn.ModuleList([] )
A__ = None
# down
A__ = in_channels
for i, down_block_type in enumerate(lowercase_ ):
A__ = output_channel
A__ = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
A__ = i == len(lowercase_ ) - 1
A__ = get_down_block(
lowercase_,num_layers=lowercase_,in_channels=lowercase_,out_channels=lowercase_,temb_channels=block_out_channels[0],add_downsample=not is_final_block or downsample_each_block,)
self.down_blocks.append(lowercase_ )
# mid
A__ = get_mid_block(
lowercase_,in_channels=block_out_channels[-1],mid_channels=block_out_channels[-1],out_channels=block_out_channels[-1],embed_dim=block_out_channels[0],num_layers=lowercase_,add_downsample=lowercase_,)
# up
A__ = list(reversed(lowercase_ ) )
A__ = reversed_block_out_channels[0]
if out_block_type is None:
A__ = out_channels
else:
A__ = block_out_channels[0]
for i, up_block_type in enumerate(lowercase_ ):
A__ = output_channel
A__ = (
reversed_block_out_channels[i + 1] if i < len(lowercase_ ) - 1 else final_upsample_channels
)
A__ = i == len(lowercase_ ) - 1
A__ = get_up_block(
lowercase_,num_layers=lowercase_,in_channels=lowercase_,out_channels=lowercase_,temb_channels=block_out_channels[0],add_upsample=not is_final_block,)
self.up_blocks.append(lowercase_ )
A__ = output_channel
# out
A__ = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4,3_2 )
A__ = get_out_block(
out_block_type=lowercase_,num_groups_out=lowercase_,embed_dim=block_out_channels[0],out_channels=lowercase_,act_fn=lowercase_,fc_dim=block_out_channels[-1] // 4,)
def snake_case__ ( self : Any,lowercase_ : torch.FloatTensor,lowercase_ : Union[torch.Tensor, float, int],lowercase_ : bool = True,)-> Union[UNetaDOutput, Tuple]:
'''simple docstring'''
A__ = timestep
if not torch.is_tensor(lowercase_ ):
A__ = torch.tensor([timesteps],dtype=torch.long,device=sample.device )
elif torch.is_tensor(lowercase_ ) and len(timesteps.shape ) == 0:
A__ = timesteps[None].to(sample.device )
A__ = self.time_proj(lowercase_ )
if self.config.use_timestep_embedding:
A__ = self.time_mlp(lowercase_ )
else:
A__ = timestep_embed[..., None]
A__ = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
A__ = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
A__ = ()
for downsample_block in self.down_blocks:
A__ , A__ = downsample_block(hidden_states=lowercase_,temb=lowercase_ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
A__ = self.mid_block(lowercase_,lowercase_ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
A__ = down_block_res_samples[-1:]
A__ = down_block_res_samples[:-1]
A__ = upsample_block(lowercase_,res_hidden_states_tuple=lowercase_,temb=lowercase_ )
# 5. post-process
if self.out_block:
A__ = self.out_block(lowercase_,lowercase_ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=lowercase_ )
| 586
|
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
A__ = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
A__ = 128
elif "12-12" in model_name:
A__ = 12
A__ = 12
elif "14-14" in model_name:
A__ = 14
A__ = 14
elif "16-16" in model_name:
A__ = 16
A__ = 16
else:
raise ValueError('Model not supported' )
A__ = 'huggingface/label-files'
if "speech-commands" in model_name:
A__ = 35
A__ = 'speech-commands-v2-id2label.json'
else:
A__ = 527
A__ = 'audioset-id2label.json'
A__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) , 'r' ) )
A__ = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
return config
def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] ) -> Dict:
'''simple docstring'''
if "module.v" in name:
A__ = name.replace('module.v' , 'audio_spectrogram_transformer' )
if "cls_token" in name:
A__ = name.replace('cls_token' , 'embeddings.cls_token' )
if "dist_token" in name:
A__ = name.replace('dist_token' , 'embeddings.distillation_token' )
if "pos_embed" in name:
A__ = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
A__ = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
# transformer blocks
if "blocks" in name:
A__ = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
A__ = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
A__ = name.replace('attn' , 'attention.self' )
if "norm1" in name:
A__ = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
A__ = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
A__ = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
A__ = name.replace('mlp.fc2' , 'output.dense' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
A__ = name.replace('audio_spectrogram_transformer.norm' , 'audio_spectrogram_transformer.layernorm' )
# classifier head
if "module.mlp_head.0" in name:
A__ = name.replace('module.mlp_head.0' , 'classifier.layernorm' )
if "module.mlp_head.1" in name:
A__ = name.replace('module.mlp_head.1' , 'classifier.dense' )
return name
def _snake_case( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
A__ = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ )
if "qkv" in key:
A__ = key.split('.' )
A__ = int(key_split[3] )
A__ = config.hidden_size
if "weight" in key:
A__ = val[:dim, :]
A__ = val[dim : dim * 2, :]
A__ = val[-dim:, :]
else:
A__ = val[:dim]
A__ = val[dim : dim * 2]
A__ = val[-dim:]
else:
A__ = val
return orig_state_dict
def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple ) -> Tuple:
'''simple docstring'''
A__ = [
'module.v.head.weight',
'module.v.head.bias',
'module.v.head_dist.weight',
'module.v.head_dist.bias',
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def _snake_case( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple=False ) -> int:
'''simple docstring'''
A__ = get_audio_spectrogram_transformer_config(SCREAMING_SNAKE_CASE__ )
A__ = {
'ast-finetuned-audioset-10-10-0.4593': (
'https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.450': (
'https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448': (
'https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448-v2': (
'https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'
),
'ast-finetuned-audioset-12-12-0.447': (
'https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'
),
'ast-finetuned-audioset-14-14-0.443': (
'https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'
),
'ast-finetuned-audioset-16-16-0.442': (
'https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'
),
'ast-finetuned-speech-commands-v2': (
'https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'
),
}
# load original state_dict
A__ = model_name_to_url[model_name]
A__ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location='cpu' )
# remove some keys
remove_keys(SCREAMING_SNAKE_CASE__ )
# rename some keys
A__ = convert_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# load 🤗 model
A__ = ASTForAudioClassification(SCREAMING_SNAKE_CASE__ )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
A__ = -4.267_7393 if 'speech-commands' not in model_name else -6.84_5978
A__ = 4.568_9974 if 'speech-commands' not in model_name else 5.565_4526
A__ = 1024 if 'speech-commands' not in model_name else 128
A__ = ASTFeatureExtractor(mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ )
if "speech-commands" in model_name:
A__ = load_dataset('speech_commands' , 'v0.02' , split='validation' )
A__ = dataset[0]['audio']['array']
else:
A__ = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' , )
A__ , A__ = torchaudio.load(SCREAMING_SNAKE_CASE__ )
A__ = waveform.squeeze().numpy()
A__ = feature_extractor(SCREAMING_SNAKE_CASE__ , sampling_rate=16000 , return_tensors='pt' )
# forward pass
A__ = model(**SCREAMING_SNAKE_CASE__ )
A__ = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
A__ = torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
A__ = torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
A__ = torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
A__ = torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
A__ = torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
A__ = torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
A__ = torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
A__ = torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError('Unknown model name' )
if not torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ):
raise ValueError('Logits don\'t match' )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(f'Saving feature extractor to {pytorch_dump_folder_path}' )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print('Pushing model and feature extractor to the hub...' )
model.push_to_hub(f'MIT/{model_name}' )
feature_extractor.push_to_hub(f'MIT/{model_name}' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="ast-finetuned-audioset-10-10-0.4593",
type=str,
help="Name of the Audio Spectrogram Transformer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
lowercase_ = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 586
| 1
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class A_ :
@staticmethod
def _lowercase ( *__lowerCAmelCase: Tuple ,**__lowerCAmelCase: Dict ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class A_ ( unittest.TestCase ):
lowerCAmelCase__ = MODEL_FOR_OBJECT_DETECTION_MAPPING
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = ObjectDetectionPipeline(model=_lowerCamelCase ,image_processor=_lowerCamelCase )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def _lowercase ( self: int ,__lowerCAmelCase: int ,__lowerCAmelCase: int ):
'''simple docstring'''
_lowerCamelCase : List[Any] = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" ,threshold=0.0 )
self.assertGreater(len(_lowerCamelCase ) ,0 )
for detected_object in outputs:
self.assertEqual(
_lowerCamelCase ,{
"score": ANY(_lowerCamelCase ),
"label": ANY(_lowerCamelCase ),
"box": {"xmin": ANY(_lowerCamelCase ), "ymin": ANY(_lowerCamelCase ), "xmax": ANY(_lowerCamelCase ), "ymax": ANY(_lowerCamelCase )},
} ,)
import datasets
_lowerCamelCase : str = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" ,"image" ,split="test" )
_lowerCamelCase : Union[str, Any] = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
]
_lowerCamelCase : List[Any] = object_detector(_lowerCamelCase ,threshold=0.0 )
self.assertEqual(len(_lowerCamelCase ) ,len(_lowerCamelCase ) )
for outputs in batch_outputs:
self.assertGreater(len(_lowerCamelCase ) ,0 )
for detected_object in outputs:
self.assertEqual(
_lowerCamelCase ,{
"score": ANY(_lowerCamelCase ),
"label": ANY(_lowerCamelCase ),
"box": {"xmin": ANY(_lowerCamelCase ), "ymin": ANY(_lowerCamelCase ), "xmax": ANY(_lowerCamelCase ), "ymax": ANY(_lowerCamelCase )},
} ,)
@require_tf
@unittest.skip("Object detection not implemented in TF" )
def _lowercase ( self: Any ):
'''simple docstring'''
pass
@require_torch
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : List[str] = """hf-internal-testing/tiny-detr-mobilenetsv3"""
_lowerCamelCase : Tuple = AutoModelForObjectDetection.from_pretrained(_lowerCamelCase )
_lowerCamelCase : List[str] = AutoFeatureExtractor.from_pretrained(_lowerCamelCase )
_lowerCamelCase : Tuple = ObjectDetectionPipeline(model=_lowerCamelCase ,feature_extractor=_lowerCamelCase )
_lowerCamelCase : Any = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" ,threshold=0.0 )
self.assertEqual(
nested_simplify(_lowerCamelCase ,decimals=4 ) ,[
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
] ,)
_lowerCamelCase : Union[str, Any] = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] ,threshold=0.0 ,)
self.assertEqual(
nested_simplify(_lowerCamelCase ,decimals=4 ) ,[
[
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
[
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
] ,)
@require_torch
@slow
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = """facebook/detr-resnet-50"""
_lowerCamelCase : Optional[Any] = AutoModelForObjectDetection.from_pretrained(_lowerCamelCase )
_lowerCamelCase : List[str] = AutoFeatureExtractor.from_pretrained(_lowerCamelCase )
_lowerCamelCase : Optional[Any] = ObjectDetectionPipeline(model=_lowerCamelCase ,feature_extractor=_lowerCamelCase )
_lowerCamelCase : Any = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(_lowerCamelCase ,decimals=4 ) ,[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] ,)
_lowerCamelCase : Optional[int] = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(_lowerCamelCase ,decimals=4 ) ,[
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] ,)
@require_torch
@slow
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Any = """facebook/detr-resnet-50"""
_lowerCamelCase : List[str] = pipeline("object-detection" ,model=_lowerCamelCase )
_lowerCamelCase : List[Any] = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(_lowerCamelCase ,decimals=4 ) ,[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] ,)
_lowerCamelCase : int = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(_lowerCamelCase ,decimals=4 ) ,[
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] ,)
@require_torch
@slow
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = 0.99_85
_lowerCamelCase : Optional[int] = """facebook/detr-resnet-50"""
_lowerCamelCase : Dict = pipeline("object-detection" ,model=_lowerCamelCase )
_lowerCamelCase : Any = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" ,threshold=_lowerCamelCase )
self.assertEqual(
nested_simplify(_lowerCamelCase ,decimals=4 ) ,[
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] ,)
@require_torch
@require_pytesseract
@slow
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = """Narsil/layoutlmv3-finetuned-funsd"""
_lowerCamelCase : Union[str, Any] = 0.99_93
_lowerCamelCase : Optional[Any] = pipeline("object-detection" ,model=_lowerCamelCase ,threshold=_lowerCamelCase )
_lowerCamelCase : Optional[int] = object_detector(
"https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" )
self.assertEqual(
nested_simplify(_lowerCamelCase ,decimals=4 ) ,[
{"score": 0.99_93, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
{"score": 0.99_93, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
] ,)
| 46
|
"""simple docstring"""
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
_A = logging.get_logger(__name__)
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ) -> Optional[Any]:
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
if not is_sharded:
UpperCAmelCase__ : Tuple = os.path.abspath(lowerCAmelCase )
logger.info(F"""Loading PyTorch weights from {pt_path}""" )
UpperCAmelCase__ : Any = torch.load(lowerCAmelCase , map_location="""cpu""" )
logger.info(F"""PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.""" )
UpperCAmelCase__ : str = convert_pytorch_state_dict_to_flax(lowerCAmelCase , lowerCAmelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
UpperCAmelCase__ : Optional[Any] = convert_pytorch_sharded_state_dict_to_flax(lowerCAmelCase , lowerCAmelCase )
return flax_state_dict
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) -> (Tuple[str], np.ndarray):
def is_key_or_prefix_key_in_dict(lowerCAmelCase ) -> bool:
return len(set(lowerCAmelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
UpperCAmelCase__ : Any = pt_tuple_key[:-1] + ("""scale""",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
UpperCAmelCase__ : Tuple = pt_tuple_key[:-1] + ("""mean""",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
UpperCAmelCase__ : Any = pt_tuple_key[:-1] + ("""var""",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
UpperCAmelCase__ : Tuple = pt_tuple_key[:-1] + ("""embedding""",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCAmelCase__ : List[str] = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(lowerCAmelCase ):
UpperCAmelCase__ : Union[str, Any] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCAmelCase__ : int = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(lowerCAmelCase ):
UpperCAmelCase__ : str = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCAmelCase__ : Tuple = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCAmelCase__ : List[str] = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
UpperCAmelCase__ : Optional[int] = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
UpperCAmelCase__ : int = pt_tuple_key[-2] + """_g"""
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
UpperCAmelCase__ : Optional[int] = pt_tuple_key[-2] + """_v"""
if name is not None:
UpperCAmelCase__ : List[str] = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> Dict:
# convert pytorch tensor to numpy
UpperCAmelCase__ : Optional[int] = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCAmelCase__ : Optional[Any] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
UpperCAmelCase__ : Any = flax_model.params["""params"""]
else:
UpperCAmelCase__ : Optional[Any] = flax_model.params
UpperCAmelCase__ : Tuple = flatten_dict(lowerCAmelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCAmelCase__ : Dict = flatten_dict(flax_model.params["""batch_stats"""] )
random_flax_state_dict.update(lowerCAmelCase )
UpperCAmelCase__ : List[str] = {}
UpperCAmelCase__ : str = (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
UpperCAmelCase__ : List[str] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase__ : Optional[Any] = tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
UpperCAmelCase__ : str = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCAmelCase__ : Optional[int] = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCAmelCase__ , UpperCAmelCase__ : int = rename_key_and_reshape_tensor(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# add model prefix if necessary
UpperCAmelCase__ : str = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCAmelCase__ : str = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
UpperCAmelCase__ : Any = jnp.asarray(lowerCAmelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(lowerCAmelCase , lowerCAmelCase )
continue
# also add unexpected weight so that warning is thrown
UpperCAmelCase__ : Union[str, Any] = jnp.asarray(lowerCAmelCase )
else:
# also add unexpected weight so that warning is thrown
UpperCAmelCase__ : Any = jnp.asarray(lowerCAmelCase )
return unflatten_dict(lowerCAmelCase )
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> str:
import torch
# Load the index
UpperCAmelCase__ : List[str] = {}
for shard_file in shard_filenames:
# load using msgpack utils
UpperCAmelCase__ : Tuple = torch.load(lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCAmelCase__ : Optional[Any] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCAmelCase__ : List[Any] = flax_model.params["""params"""]
UpperCAmelCase__ : int = flatten_dict(lowerCAmelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params["""batch_stats"""] ) )
else:
UpperCAmelCase__ : Any = flax_model.params
UpperCAmelCase__ : List[Any] = flatten_dict(lowerCAmelCase )
UpperCAmelCase__ : int = (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
UpperCAmelCase__ : str = (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase__ : Optional[int] = tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
UpperCAmelCase__ : Dict = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCAmelCase__ : Optional[int] = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = rename_key_and_reshape_tensor(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# add model prefix if necessary
UpperCAmelCase__ : Tuple = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCAmelCase__ : Optional[int] = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
UpperCAmelCase__ : Any = jnp.asarray(lowerCAmelCase )
continue
if "var" in flax_key[-1]:
UpperCAmelCase__ : List[str] = jnp.asarray(lowerCAmelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(lowerCAmelCase , lowerCAmelCase )
continue
# also add unexpected weight so that warning is thrown
UpperCAmelCase__ : Optional[int] = jnp.asarray(lowerCAmelCase )
else:
# also add unexpected weight so that warning is thrown
UpperCAmelCase__ : List[str] = jnp.asarray(lowerCAmelCase )
return unflatten_dict(lowerCAmelCase )
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> Union[str, Any]:
UpperCAmelCase__ : Optional[Any] = os.path.abspath(lowerCAmelCase )
logger.info(F"""Loading Flax weights from {flax_checkpoint_path}""" )
# import correct flax class
UpperCAmelCase__ : Dict = getattr(lowerCAmelCase , """Flax""" + model.__class__.__name__ )
# load flax weight dict
with open(lowerCAmelCase , """rb""" ) as state_f:
try:
UpperCAmelCase__ : Any = from_bytes(lowerCAmelCase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F"""Unable to convert {flax_checkpoint_path} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(lowerCAmelCase , lowerCAmelCase )
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> Optional[Any]:
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
UpperCAmelCase__ : List[Any] = flatten_dict(jax.tree_util.tree_map(lambda lowerCAmelCase : x.dtype == jnp.bfloataa , lowerCAmelCase ) ).values()
if any(lowerCAmelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
UpperCAmelCase__ : Union[str, Any] = jax.tree_util.tree_map(
lambda lowerCAmelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , lowerCAmelCase )
UpperCAmelCase__ : int = flatten_dict(lowerCAmelCase )
UpperCAmelCase__ : str = pt_model.state_dict()
UpperCAmelCase__ : Optional[int] = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
UpperCAmelCase__ : str = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
UpperCAmelCase__ : List[str] = []
UpperCAmelCase__ : Optional[int] = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
UpperCAmelCase__ : Dict = flax_key_tuple[0] == pt_model.base_model_prefix
UpperCAmelCase__ : List[str] = """.""".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCAmelCase__ : int = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
UpperCAmelCase__ : List[Any] = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(lowerCAmelCase ) not in pt_model_dict:
# conv layer
UpperCAmelCase__ : Union[str, Any] = flax_key_tuple[:-1] + ("""weight""",)
UpperCAmelCase__ : str = jnp.transpose(lowerCAmelCase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(lowerCAmelCase ) not in pt_model_dict:
# linear layer
UpperCAmelCase__ : Optional[Any] = flax_key_tuple[:-1] + ("""weight""",)
UpperCAmelCase__ : Union[str, Any] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCAmelCase__ : Any = flax_key_tuple[:-1] + ("""weight""",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
UpperCAmelCase__ : Any = flax_key_tuple[:-1] + ("""running_mean""",)
elif "var" in flax_key_tuple[-1]:
UpperCAmelCase__ : Dict = flax_key_tuple[:-1] + ("""running_var""",)
if "batch_stats" in flax_state:
UpperCAmelCase__ : int = """.""".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
UpperCAmelCase__ : Optional[Any] = """.""".join(lowerCAmelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
UpperCAmelCase__ : Optional[int] = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
UpperCAmelCase__ : str = key.split(""".""" )
UpperCAmelCase__ : Optional[int] = None
if key_components[-3::2] == ["parametrizations", "original0"]:
UpperCAmelCase__ : Dict = key_components[-2] + """_g"""
elif key_components[-3::2] == ["parametrizations", "original1"]:
UpperCAmelCase__ : Dict = key_components[-2] + """_v"""
if name is not None:
UpperCAmelCase__ : Optional[Any] = key_components[:-3] + [name]
UpperCAmelCase__ : Any = """.""".join(lowerCAmelCase )
UpperCAmelCase__ : List[str] = key
if flax_key in special_pt_names:
UpperCAmelCase__ : Optional[Any] = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
F"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
UpperCAmelCase__ : Dict = np.asarray(lowerCAmelCase ) if not isinstance(lowerCAmelCase , np.ndarray ) else flax_tensor
UpperCAmelCase__ : Optional[int] = torch.from_numpy(lowerCAmelCase )
# remove from missing keys
missing_keys.remove(lowerCAmelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(lowerCAmelCase )
pt_model.load_state_dict(lowerCAmelCase )
# re-transform missing_keys to list
UpperCAmelCase__ : Optional[Any] = list(lowerCAmelCase )
if len(lowerCAmelCase ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
F""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
F""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
F""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
else:
logger.warning(F"""All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n""" )
if len(lowerCAmelCase ) > 0:
logger.warning(
F"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
F""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
""" use it for predictions and inference.""" )
else:
logger.warning(
F"""All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n"""
"""If your task is similar to the task the model of the checkpoint was trained on, """
F"""you can already use {pt_model.__class__.__name__} for predictions without further training.""" )
return pt_model
| 182
| 0
|
"""simple docstring"""
def _lowercase ( __snake_case ,__snake_case ) -> list[int]:
__lowerCAmelCase : List[str] = int(__snake_case )
# Initialize Result
__lowerCAmelCase : List[Any] = []
# Traverse through all denomination
for denomination in reversed(__snake_case ):
# Find denominations
while int(__snake_case ) >= int(__snake_case ):
total_value -= int(__snake_case )
answer.append(__snake_case ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__snake_case : Optional[Any] = []
__snake_case : List[str] = '0'
if (
input('Do you want to enter your denominations ? (yY/n): ').strip().lower()
== "y"
):
__snake_case : Union[str, Any] = int(input('Enter the number of denominations you want to add: ').strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
__snake_case : List[str] = input('Enter the change you want to make in Indian Currency: ').strip()
else:
# All denominations of Indian Currency if user does not enter
__snake_case : Tuple = [1, 2, 5, 10, 20, 50, 100, 500, 2_000]
__snake_case : Tuple = input('Enter the change you want to make: ').strip()
if int(value) == 0 or int(value) < 0:
print('The total value cannot be zero or negative.')
else:
print(F"""Following is minimal change for {value}: """)
__snake_case : str = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=' ')
| 703
|
"""simple docstring"""
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
__snake_case : Optional[Any] = logging.getLogger(__name__)
require_version('pytorch_lightning>=1.0.4')
__snake_case : List[Any] = {
'base': AutoModel,
'sequence-classification': AutoModelForSequenceClassification,
'question-answering': AutoModelForQuestionAnswering,
'pretraining': AutoModelForPreTraining,
'token-classification': AutoModelForTokenClassification,
'language-modeling': AutoModelWithLMHead,
'summarization': AutoModelForSeqaSeqLM,
'translation': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
__snake_case : Dict = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
__snake_case : int = sorted(arg_to_scheduler.keys())
__snake_case : Optional[Any] = '{' + ', '.join(arg_to_scheduler_choices) + '}'
class A__ ( pl.LightningModule ):
'''simple docstring'''
def __init__( self: List[Any] , _SCREAMING_SNAKE_CASE: argparse.Namespace , _SCREAMING_SNAKE_CASE: Union[str, Any]=None , _SCREAMING_SNAKE_CASE: Union[str, Any]="base" , _SCREAMING_SNAKE_CASE: Optional[int]=None , _SCREAMING_SNAKE_CASE: Optional[Any]=None , _SCREAMING_SNAKE_CASE: Any=None , **_SCREAMING_SNAKE_CASE: int , ) -> int:
"""simple docstring"""
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = 0
__lowerCAmelCase : int = Path(self.hparams.output_dir)
__lowerCAmelCase : List[str] = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
__lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({"num_labels": num_labels} if num_labels is not None else {}) , cache_dir=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
else:
__lowerCAmelCase : PretrainedConfig = config
__lowerCAmelCase : Dict = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(self.hparams , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
assert hasattr(self.config , _SCREAMING_SNAKE_CASE), F"""model config doesn't have a `{p}` attribute"""
setattr(self.config , _SCREAMING_SNAKE_CASE , getattr(self.hparams , _SCREAMING_SNAKE_CASE))
if tokenizer is None:
__lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=_SCREAMING_SNAKE_CASE , )
else:
__lowerCAmelCase : PreTrainedTokenizer = tokenizer
__lowerCAmelCase : int = MODEL_MODES[mode]
if model is None:
__lowerCAmelCase : Any = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool(".ckpt" in self.hparams.model_name_or_path) , config=self.config , cache_dir=_SCREAMING_SNAKE_CASE , )
else:
__lowerCAmelCase : Union[str, Any] = model
def _SCREAMING_SNAKE_CASE ( self: str , *_SCREAMING_SNAKE_CASE: Union[str, Any] , **_SCREAMING_SNAKE_CASE: Dict) -> str:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.model_type.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Any = arg_to_scheduler[self.hparams.lr_scheduler]
__lowerCAmelCase : Tuple = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps())
__lowerCAmelCase : Any = {"scheduler": scheduler, "interval": "step", "frequency": 1}
return scheduler
def _SCREAMING_SNAKE_CASE ( self: str) -> int:
"""simple docstring"""
__lowerCAmelCase : Dict = self.model
__lowerCAmelCase : Tuple = ["bias", "LayerNorm.weight"]
__lowerCAmelCase : Optional[Any] = [
{
"params": [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)
], # check this named paramters
"weight_decay": self.hparams.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
if self.hparams.adafactor:
__lowerCAmelCase : Dict = Adafactor(
_SCREAMING_SNAKE_CASE , lr=self.hparams.learning_rate , scale_parameter=_SCREAMING_SNAKE_CASE , relative_step=_SCREAMING_SNAKE_CASE)
else:
__lowerCAmelCase : Dict = AdamW(
_SCREAMING_SNAKE_CASE , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon)
__lowerCAmelCase : int = optimizer
__lowerCAmelCase : Optional[int] = self.get_lr_scheduler()
return [optimizer], [scheduler]
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Any) -> Union[str, Any]:
"""simple docstring"""
return self.validation_step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Optional[int] , _SCREAMING_SNAKE_CASE: Any) -> List[Any]:
"""simple docstring"""
return self.validation_end(_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: str) -> int:
"""simple docstring"""
__lowerCAmelCase : Tuple = max(1 , self.hparams.gpus) # TODO: consider num_tpu_cores
__lowerCAmelCase : Tuple = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
if stage == "test":
__lowerCAmelCase : List[str] = len(self.test_dataloader().dataset)
else:
__lowerCAmelCase : Tuple = self.get_dataloader("train" , self.hparams.train_batch_size , shuffle=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = len(self.train_dataloader().dataset)
def _SCREAMING_SNAKE_CASE ( self: int , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: bool = False) -> int:
"""simple docstring"""
raise NotImplementedError("You must implement this for your task")
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> List[str]:
"""simple docstring"""
return self.train_loader
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> Tuple:
"""simple docstring"""
return self.get_dataloader("dev" , self.hparams.eval_batch_size , shuffle=_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Dict) -> Optional[int]:
"""simple docstring"""
return self.get_dataloader("test" , self.hparams.eval_batch_size , shuffle=_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: int , _SCREAMING_SNAKE_CASE: Any) -> int:
"""simple docstring"""
return os.path.join(
self.hparams.data_dir , "cached_{}_{}_{}".format(
_SCREAMING_SNAKE_CASE , list(filter(_SCREAMING_SNAKE_CASE , self.hparams.model_name_or_path.split("/"))).pop() , str(self.hparams.max_seq_length) , ) , )
@pl.utilities.rank_zero_only
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Dict[str, Any]) -> None:
"""simple docstring"""
__lowerCAmelCase : Dict = self.output_dir.joinpath("best_tfmr")
__lowerCAmelCase : str = self.step_count
self.model.save_pretrained(_SCREAMING_SNAKE_CASE)
self.tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE)
@staticmethod
def _SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Dict) -> Optional[Any]:
"""simple docstring"""
parser.add_argument(
"--model_name_or_path" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--config_name" , default="" , type=_SCREAMING_SNAKE_CASE , help="Pretrained config name or path if not the same as model_name")
parser.add_argument(
"--tokenizer_name" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , help="Pretrained tokenizer name or path if not the same as model_name" , )
parser.add_argument(
"--cache_dir" , default=str(Path(_SCREAMING_SNAKE_CASE).parent / "test_run" / "cache") , type=_SCREAMING_SNAKE_CASE , help="Where do you want to store the pre-trained models downloaded from huggingface.co" , )
parser.add_argument(
"--encoder_layerdrop" , type=_SCREAMING_SNAKE_CASE , help="Encoder layer dropout probability (Optional). Goes into model.config" , )
parser.add_argument(
"--decoder_layerdrop" , type=_SCREAMING_SNAKE_CASE , help="Decoder layer dropout probability (Optional). Goes into model.config" , )
parser.add_argument(
"--dropout" , type=_SCREAMING_SNAKE_CASE , help="Dropout probability (Optional). Goes into model.config" , )
parser.add_argument(
"--attention_dropout" , type=_SCREAMING_SNAKE_CASE , help="Attention dropout probability (Optional). Goes into model.config" , )
parser.add_argument("--learning_rate" , default=5e-5 , type=_SCREAMING_SNAKE_CASE , help="The initial learning rate for Adam.")
parser.add_argument(
"--lr_scheduler" , default="linear" , choices=_SCREAMING_SNAKE_CASE , metavar=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , help="Learning rate scheduler" , )
parser.add_argument("--weight_decay" , default=0.0 , type=_SCREAMING_SNAKE_CASE , help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon" , default=1e-8 , type=_SCREAMING_SNAKE_CASE , help="Epsilon for Adam optimizer.")
parser.add_argument("--warmup_steps" , default=0 , type=_SCREAMING_SNAKE_CASE , help="Linear warmup over warmup_steps.")
parser.add_argument("--num_workers" , default=4 , type=_SCREAMING_SNAKE_CASE , help="kwarg passed to DataLoader")
parser.add_argument("--num_train_epochs" , dest="max_epochs" , default=3 , type=_SCREAMING_SNAKE_CASE)
parser.add_argument("--train_batch_size" , default=32 , type=_SCREAMING_SNAKE_CASE)
parser.add_argument("--eval_batch_size" , default=32 , type=_SCREAMING_SNAKE_CASE)
parser.add_argument("--adafactor" , action="store_true")
class A__ ( pl.Callback ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Tuple) -> Any:
"""simple docstring"""
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class A__ ( pl.Callback ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Tuple) -> Optional[int]:
"""simple docstring"""
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(_SCREAMING_SNAKE_CASE)
class A__ ( pl.Callback ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: Dict , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: int) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : List[str] = trainer.lr_schedulers[0]["scheduler"]
__lowerCAmelCase : str = {F"""lr_group_{i}""": lr for i, lr in enumerate(lr_scheduler.get_lr())}
pl_module.logger.log_metrics(_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: pl.Trainer , _SCREAMING_SNAKE_CASE: pl.LightningModule) -> str:
"""simple docstring"""
rank_zero_info("***** Validation results *****")
__lowerCAmelCase : Tuple = trainer.callback_metrics
# Log results
for key in sorted(_SCREAMING_SNAKE_CASE):
if key not in ["log", "progress_bar"]:
rank_zero_info("{} = {}\n".format(_SCREAMING_SNAKE_CASE , str(metrics[key])))
def _SCREAMING_SNAKE_CASE ( self: Optional[int] , _SCREAMING_SNAKE_CASE: pl.Trainer , _SCREAMING_SNAKE_CASE: pl.LightningModule) -> List[Any]:
"""simple docstring"""
rank_zero_info("***** Test results *****")
__lowerCAmelCase : Optional[int] = trainer.callback_metrics
# Log and save results to file
__lowerCAmelCase : List[Any] = os.path.join(pl_module.hparams.output_dir , "test_results.txt")
with open(_SCREAMING_SNAKE_CASE , "w") as writer:
for key in sorted(_SCREAMING_SNAKE_CASE):
if key not in ["log", "progress_bar"]:
rank_zero_info("{} = {}\n".format(_SCREAMING_SNAKE_CASE , str(metrics[key])))
writer.write("{} = {}\n".format(_SCREAMING_SNAKE_CASE , str(metrics[key])))
def _lowercase ( __snake_case ,__snake_case ) -> None:
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
"--output_dir" ,default=str(Path(__snake_case ).parent / "test_run" / "model_checkpoints" ) ,type=__snake_case ,help="The output directory where the model predictions and checkpoints will be written." ,)
parser.add_argument(
"--fp16" ,action="store_true" ,help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" ,)
parser.add_argument(
"--fp16_opt_level" ,type=__snake_case ,default="O2" ,help=(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
) ,)
parser.add_argument("--n_tpu_cores" ,dest="tpu_cores" ,type=__snake_case )
parser.add_argument("--max_grad_norm" ,dest="gradient_clip_val" ,default=1.0 ,type=__snake_case ,help="Max gradient norm" )
parser.add_argument("--do_train" ,action="store_true" ,help="Whether to run training." )
parser.add_argument("--do_predict" ,action="store_true" ,help="Whether to run predictions on the test set." )
parser.add_argument(
"--gradient_accumulation_steps" ,dest="accumulate_grad_batches" ,type=__snake_case ,default=1 ,help="Number of updates steps to accumulate before performing a backward/update pass." ,)
parser.add_argument("--seed" ,type=__snake_case ,default=42 ,help="random seed for initialization" )
parser.add_argument(
"--data_dir" ,default=str(Path(__snake_case ).parent / "test_run" / "dummy-train-data" ) ,type=__snake_case ,help="The input data dir. Should contain the training files for the CoNLL-2003 NER task." ,)
def _lowercase ( __snake_case ,__snake_case ,__snake_case=None ,__snake_case=True ,__snake_case=[] ,__snake_case=None ,__snake_case=None ,**__snake_case ,) -> Tuple:
pl.seed_everything(args.seed )
# init model
__lowerCAmelCase : List[Any] = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=__snake_case )
# add custom checkpoints
if checkpoint_callback is None:
__lowerCAmelCase : Optional[Any] = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir ,prefix="checkpoint" ,monitor="val_loss" ,mode="min" ,save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(__snake_case )
if logging_callback is None:
__lowerCAmelCase : Optional[Any] = LoggingCallback()
__lowerCAmelCase : int = {}
if args.fpaa:
__lowerCAmelCase : Optional[int] = 16
if args.gpus > 1:
__lowerCAmelCase : int = "auto"
__lowerCAmelCase : List[Any] = "ddp"
__lowerCAmelCase : Optional[int] = args.accumulate_grad_batches
__lowerCAmelCase : int = None
__lowerCAmelCase : Any = "auto"
__lowerCAmelCase : Optional[Any] = pl.Trainer.from_argparse_args(
__snake_case ,weights_summary=__snake_case ,callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] ,logger=__snake_case ,val_check_interval=1 ,num_sanity_val_steps=2 ,**__snake_case ,)
if args.do_train:
trainer.fit(__snake_case )
else:
print("RAG modeling tests with new set functions successfuly executed!" )
return trainer
| 615
| 0
|
"""simple docstring"""
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 103
|
"""simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {'''vocab_file''': '''vocab.txt'''}
snake_case = {
'''vocab_file''': {
'''openbmb/cpm-ant-10b''': '''https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt''',
},
}
snake_case = {
'''openbmb/cpm-ant-10b''': 1_0_2_4,
}
def snake_case ( lowerCAmelCase_ ) -> int:
_snake_case = collections.OrderedDict()
with open(lowerCAmelCase_ , '''r''' , encoding='''utf-8''' ) as reader:
_snake_case = reader.readlines()
for index, token in enumerate(lowerCAmelCase_ ):
_snake_case = token.rstrip('''\n''' )
_snake_case = index
return vocab
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : List[str]="<unk>" , __lowerCamelCase : Tuple=2_0_0 ):
"""simple docstring"""
_snake_case = vocab
_snake_case = unk_token
_snake_case = max_input_chars_per_word
def __UpperCAmelCase ( self : Any , __lowerCamelCase : str ):
"""simple docstring"""
_snake_case = list(__lowerCamelCase )
if len(__lowerCamelCase ) > self.max_input_chars_per_word:
return [self.unk_token]
_snake_case = 0
_snake_case = []
while start < len(__lowerCamelCase ):
_snake_case = len(__lowerCamelCase )
_snake_case = None
while start < end:
_snake_case = ''''''.join(chars[start:end] )
if substr in self.vocab:
_snake_case = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(__lowerCamelCase )
_snake_case = end
return sub_tokens
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
A__ : List[str] = VOCAB_FILES_NAMES
A__ : str = PRETRAINED_VOCAB_FILES_MAP
A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Union[str, Any] = ['''input_ids''', '''attention_mask''']
A__ : Optional[int] = False
def __init__( self : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str="<d>" , __lowerCamelCase : Tuple="</d>" , __lowerCamelCase : Tuple="<s>" , __lowerCamelCase : int="</s>" , __lowerCamelCase : List[str]="<pad>" , __lowerCamelCase : int="<unk>" , __lowerCamelCase : int="</n>" , __lowerCamelCase : Tuple="</_>" , __lowerCamelCase : Optional[Any]="left" , **__lowerCamelCase : str , ):
"""simple docstring"""
requires_backends(self , ['''jieba'''] )
super().__init__(
bod_token=__lowerCamelCase , eod_token=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , pad_token=__lowerCamelCase , unk_token=__lowerCamelCase , line_token=__lowerCamelCase , space_token=__lowerCamelCase , padding_side=__lowerCamelCase , **__lowerCamelCase , )
_snake_case = bod_token
_snake_case = eod_token
_snake_case = load_vocab(__lowerCamelCase )
_snake_case = self.encoder[space_token]
_snake_case = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
_snake_case = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __lowerCamelCase : x[1] ) )
_snake_case = {v: k for k, v in self.encoder.items()}
_snake_case = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
return self.encoder["\n"]
@property
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
return len(self.encoder )
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
_snake_case = []
for x in jieba.cut(__lowerCamelCase , cut_all=__lowerCamelCase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(__lowerCamelCase ) )
return output_tokens
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Optional[Any] ):
"""simple docstring"""
_snake_case = [i for i in token_ids if i >= 0]
_snake_case = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(__lowerCamelCase , **__lowerCamelCase )
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
return token in self.encoder
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : List[str] ):
"""simple docstring"""
return "".join(__lowerCamelCase )
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : int ):
"""simple docstring"""
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : int ):
"""simple docstring"""
return self.decoder.get(__lowerCamelCase , self.unk_token )
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
"""simple docstring"""
if os.path.isdir(__lowerCamelCase ):
_snake_case = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
_snake_case = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
_snake_case = 0
if " " in self.encoder:
_snake_case = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
_snake_case = self.encoder['''\n''']
del self.encoder["\n"]
_snake_case = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __lowerCamelCase : x[1] ) )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
_snake_case = token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : List[int] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def __UpperCAmelCase ( self : str , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is not None:
return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase ))
return [1] + ([0] * len(__lowerCamelCase ))
| 103
| 1
|
'''simple docstring'''
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def A ( A_ : str , A_ : complex , A_ : str = "x" , A_ : float = 10**-10 , A_ : int = 1 , ):
snake_case : Dict = symbols(A_ )
snake_case : Union[str, Any] = lambdify(A_ , A_ )
snake_case : List[Any] = lambdify(A_ , diff(A_ , A_ ) )
snake_case : Optional[int] = starting_point
while True:
if diff_function(A_ ) != 0:
snake_case : List[Any] = prev_guess - multiplicity * func(A_ ) / diff_function(
A_ )
else:
raise ZeroDivisionError('''Could not find root''' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
snake_case : List[Any] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''')
# Find root of polynomial
# Find fourth Root of 5
print(f'''The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}''')
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
f'''{newton_raphson('log(y) - 1', 2, variable='y')}''',
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
f'''{newton_raphson('exp(x) - 1', 10, precision=0.005)}''',
)
# Find root of cos(x)
print(f'''The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}''')
| 555
|
'''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase = {
"vocab_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json",
},
"merges_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt",
},
"tokenizer_file": {
"Salesforce/codegen-350M-mono": (
"https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase = {
"Salesforce/codegen-350M-mono": 2_048,
}
class a ( __magic_name__ ):
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ['''input_ids''', '''attention_mask''']
_snake_case = CodeGenTokenizer
def __init__( self : Any, SCREAMING_SNAKE_CASE_ : Dict=None, SCREAMING_SNAKE_CASE_ : Optional[Any]=None, SCREAMING_SNAKE_CASE_ : Optional[Any]=None, SCREAMING_SNAKE_CASE_ : Optional[int]="<|endoftext|>", SCREAMING_SNAKE_CASE_ : List[Any]="<|endoftext|>", SCREAMING_SNAKE_CASE_ : List[str]="<|endoftext|>", SCREAMING_SNAKE_CASE_ : int=False, **SCREAMING_SNAKE_CASE_ : Optional[int], ):
super().__init__(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, tokenizer_file=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, add_prefix_space=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
if kwargs.pop('''add_bos_token''', SCREAMING_SNAKE_CASE_ ):
snake_case : Dict = kwargs.pop('''name_or_path''', '''''' )
raise ValueError(
'''Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.'''
'''Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n'''
F"""`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"""
F"""`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"""
'''This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.'''
''' so that the fast tokenizer works correctly.''' )
snake_case : int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''', SCREAMING_SNAKE_CASE_ ) != add_prefix_space:
snake_case : Optional[Any] = getattr(SCREAMING_SNAKE_CASE_, pre_tok_state.pop('''type''' ) )
snake_case : Union[str, Any] = add_prefix_space
snake_case : Optional[Any] = pre_tok_class(**SCREAMING_SNAKE_CASE_ )
snake_case : int = add_prefix_space
def __snake_case ( self : List[Any], *SCREAMING_SNAKE_CASE_ : Tuple, **SCREAMING_SNAKE_CASE_ : int ):
snake_case : Dict = kwargs.get('''is_split_into_words''', SCREAMING_SNAKE_CASE_ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[str], *SCREAMING_SNAKE_CASE_ : str, **SCREAMING_SNAKE_CASE_ : Optional[int] ):
snake_case : Dict = kwargs.get('''is_split_into_words''', SCREAMING_SNAKE_CASE_ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Tuple, SCREAMING_SNAKE_CASE_ : str, SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
snake_case : Optional[int] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_, name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Dict, SCREAMING_SNAKE_CASE_ : Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"], SCREAMING_SNAKE_CASE_ : bool = False, SCREAMING_SNAKE_CASE_ : bool = None, SCREAMING_SNAKE_CASE_ : Optional[List[str]] = None, **SCREAMING_SNAKE_CASE_ : Union[str, Any], ):
snake_case : Dict = super().decode(
token_ids=SCREAMING_SNAKE_CASE_, skip_special_tokens=SCREAMING_SNAKE_CASE_, clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
if truncate_before_pattern is not None and len(SCREAMING_SNAKE_CASE_ ) > 0:
snake_case : Optional[int] = self.truncate(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
return decoded_text
def __snake_case ( self : int, SCREAMING_SNAKE_CASE_ : Optional[Any], SCREAMING_SNAKE_CASE_ : Tuple ):
def find_re(SCREAMING_SNAKE_CASE_ : Optional[int], SCREAMING_SNAKE_CASE_ : Dict, SCREAMING_SNAKE_CASE_ : Optional[Any] ):
snake_case : Optional[Any] = pattern.search(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
return m.start() if m else -1
snake_case : Union[str, Any] = [re.compile(SCREAMING_SNAKE_CASE_, re.MULTILINE ) for pattern in truncate_before_pattern]
snake_case : Union[str, Any] = list(re.finditer('''^print''', SCREAMING_SNAKE_CASE_, re.MULTILINE ) )
if len(SCREAMING_SNAKE_CASE_ ) > 1:
snake_case : Tuple = completion[: prints[1].start()]
snake_case : List[str] = list(re.finditer('''^def''', SCREAMING_SNAKE_CASE_, re.MULTILINE ) )
if len(SCREAMING_SNAKE_CASE_ ) > 1:
snake_case : Optional[Any] = completion[: defs[1].start()]
snake_case : Tuple = 0
snake_case : List[Any] = [
pos for pos in [find_re(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) for terminal in terminals] if pos != -1
]
if len(SCREAMING_SNAKE_CASE_ ) > 0:
return completion[: min(SCREAMING_SNAKE_CASE_ )]
else:
return completion
| 555
| 1
|
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_A , '''embed_dim''' ) )
self.parent.assertTrue(hasattr(_A , '''num_heads''' ) )
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : List[str] , _A : List[Any] , _A : Optional[int]=13 , _A : Optional[Any]=64 , _A : Optional[Any]=3 , _A : Dict=[16, 48, 96] , _A : Optional[int]=[1, 3, 6] , _A : str=[1, 2, 10] , _A : Dict=[7, 3, 3] , _A : Tuple=[4, 2, 2] , _A : Optional[int]=[2, 1, 1] , _A : List[Any]=[2, 2, 2] , _A : int=[False, False, True] , _A : int=[0.0, 0.0, 0.0] , _A : Dict=0.02 , _A : int=1e-12 , _A : int=True , _A : List[str]=True , _A : Optional[int]=2 , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = parent
__SCREAMING_SNAKE_CASE : Optional[int] = batch_size
__SCREAMING_SNAKE_CASE : List[str] = image_size
__SCREAMING_SNAKE_CASE : List[str] = patch_sizes
__SCREAMING_SNAKE_CASE : str = patch_stride
__SCREAMING_SNAKE_CASE : Tuple = patch_padding
__SCREAMING_SNAKE_CASE : int = is_training
__SCREAMING_SNAKE_CASE : Any = use_labels
__SCREAMING_SNAKE_CASE : str = num_labels
__SCREAMING_SNAKE_CASE : Tuple = num_channels
__SCREAMING_SNAKE_CASE : Tuple = embed_dim
__SCREAMING_SNAKE_CASE : Any = num_heads
__SCREAMING_SNAKE_CASE : str = stride_kv
__SCREAMING_SNAKE_CASE : Tuple = depth
__SCREAMING_SNAKE_CASE : List[Any] = cls_token
__SCREAMING_SNAKE_CASE : List[str] = attention_drop_rate
__SCREAMING_SNAKE_CASE : Optional[int] = initializer_range
__SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_labels:
# create a random int32 tensor of given shape
__SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.num_labels )
__SCREAMING_SNAKE_CASE : int = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : Dict , _A : Union[str, Any] , _A : List[str] , _A : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = TFCvtModel(config=_A )
__SCREAMING_SNAKE_CASE : List[str] = model(_A , training=_A )
__SCREAMING_SNAKE_CASE : List[Any] = (self.image_size, self.image_size)
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__SCREAMING_SNAKE_CASE : List[str] = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__SCREAMING_SNAKE_CASE : Tuple = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def UpperCAmelCase__ ( self : str , _A : Union[str, Any] , _A : str , _A : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = self.num_labels
__SCREAMING_SNAKE_CASE : Optional[Any] = TFCvtForImageClassification(_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = model(_A , labels=_A , training=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = config_and_inputs
__SCREAMING_SNAKE_CASE : Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
lowerCAmelCase_ = (
{'''feature-extraction''': TFCvtModel, '''image-classification''': TFCvtForImageClassification}
if is_tf_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = TFCvtModelTester(self )
__SCREAMING_SNAKE_CASE : Optional[Any] = TFCvtConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason='''Cvt does not output attentions''' )
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
@slow
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
super().test_keras_fit()
@unittest.skip(reason='''Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8''' )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = tf.keras.mixed_precision.Policy('''mixed_float16''' )
tf.keras.mixed_precision.set_global_policy(_A )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy('''float32''' )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : Optional[Any] = model_class(_A )
__SCREAMING_SNAKE_CASE : Dict = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE : Dict = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
def check_hidden_states_output(_A : int , _A : int , _A : int ):
__SCREAMING_SNAKE_CASE : Optional[int] = model_class(_A )
__SCREAMING_SNAKE_CASE : str = model(**self._prepare_for_class(_A , _A ) )
__SCREAMING_SNAKE_CASE : str = outputs.hidden_states
__SCREAMING_SNAKE_CASE : str = len(self.model_tester.depth )
self.assertEqual(len(_A ) , _A )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : Dict = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE : int = True
check_hidden_states_output(_A , _A , _A )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : Optional[int] = TFCvtModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.default_image_processor
__SCREAMING_SNAKE_CASE : Union[str, Any] = prepare_img()
__SCREAMING_SNAKE_CASE : int = image_processor(images=_A , return_tensors='''tf''' )
# forward pass
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(**_A )
# verify the logits
__SCREAMING_SNAKE_CASE : Union[str, Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _A )
__SCREAMING_SNAKE_CASE : Any = tf.constant([0.92_85, 0.90_15, -0.31_50] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _A , atol=1e-4 ) )
| 74
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowercase_ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""")
lowercase_ = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
lowercase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __UpperCamelCase :
"""simple docstring"""
lowerCAmelCase_ = field(
default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , )
lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''A folder containing the training data.'''} )
lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''A folder containing the validation data.'''} )
lowerCAmelCase_ = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
lowerCAmelCase_ = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} )
lowerCAmelCase_ = field(
default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = {}
if self.train_dir is not None:
__SCREAMING_SNAKE_CASE : Dict = self.train_dir
if self.validation_dir is not None:
__SCREAMING_SNAKE_CASE : Any = self.validation_dir
__SCREAMING_SNAKE_CASE : List[Any] = data_files if data_files else None
@dataclass
class __UpperCamelCase :
"""simple docstring"""
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a '''
'''checkpoint identifier on the hub. '''
'''Don\'t set if you want to train a model from scratch.'''
)
} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(lowerCAmelCase__ )} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , )
lowerCAmelCase_ = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''Name or path of preprocessor config.'''} )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={
'''help''': (
'''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.'''
)
} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={
'''help''': (
'''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.'''
)
} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={'''help''': '''Stride to use for the encoder.'''} , )
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Tuple , _A : Optional[int]=192 , _A : List[Any]=32 , _A : Optional[int]=4 , _A : str=0.6 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = input_size
__SCREAMING_SNAKE_CASE : List[str] = mask_patch_size
__SCREAMING_SNAKE_CASE : Dict = model_patch_size
__SCREAMING_SNAKE_CASE : int = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError('''Input size must be divisible by mask patch size''' )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError('''Mask patch size must be divisible by model patch size''' )
__SCREAMING_SNAKE_CASE : Any = self.input_size // self.mask_patch_size
__SCREAMING_SNAKE_CASE : Optional[Any] = self.mask_patch_size // self.model_patch_size
__SCREAMING_SNAKE_CASE : int = self.rand_size**2
__SCREAMING_SNAKE_CASE : Optional[int] = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = np.random.permutation(self.token_count )[: self.mask_count]
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.zeros(self.token_count , dtype=_A )
__SCREAMING_SNAKE_CASE : Optional[int] = 1
__SCREAMING_SNAKE_CASE : List[str] = mask.reshape((self.rand_size, self.rand_size) )
__SCREAMING_SNAKE_CASE : List[Any] = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack([example['''pixel_values'''] for example in examples] )
__SCREAMING_SNAKE_CASE : Any = torch.stack([example['''mask'''] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def a__ ( ):
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__SCREAMING_SNAKE_CASE : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_mim''' , snake_case , snake_case )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Tuple = training_args.get_process_log_level()
logger.setLevel(snake_case )
transformers.utils.logging.set_verbosity(snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
__SCREAMING_SNAKE_CASE : Tuple = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__SCREAMING_SNAKE_CASE : Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset.
__SCREAMING_SNAKE_CASE : Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
__SCREAMING_SNAKE_CASE : Any = None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , snake_case ) and data_args.train_val_split > 0.0:
__SCREAMING_SNAKE_CASE : List[str] = ds['''train'''].train_test_split(data_args.train_val_split )
__SCREAMING_SNAKE_CASE : int = split['''train''']
__SCREAMING_SNAKE_CASE : Dict = split['''test''']
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__SCREAMING_SNAKE_CASE : List[Any] = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
__SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained(model_args.config_name_or_path , **snake_case )
elif model_args.model_name_or_path:
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , **snake_case )
else:
__SCREAMING_SNAKE_CASE : List[Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(snake_case , '''decoder_type''' ):
__SCREAMING_SNAKE_CASE : Any = '''simmim'''
# adapt config
__SCREAMING_SNAKE_CASE : str = model_args.image_size if model_args.image_size is not None else config.image_size
__SCREAMING_SNAKE_CASE : int = model_args.patch_size if model_args.patch_size is not None else config.patch_size
__SCREAMING_SNAKE_CASE : str = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
'''image_size''': model_args.image_size,
'''patch_size''': model_args.patch_size,
'''encoder_stride''': model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
__SCREAMING_SNAKE_CASE : int = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **snake_case )
elif model_args.model_name_or_path:
__SCREAMING_SNAKE_CASE : List[Any] = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **snake_case )
else:
__SCREAMING_SNAKE_CASE : List[Any] = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
__SCREAMING_SNAKE_CASE : str = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
__SCREAMING_SNAKE_CASE : int = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
__SCREAMING_SNAKE_CASE : List[Any] = AutoModelForMaskedImageModeling.from_config(snake_case )
if training_args.do_train:
__SCREAMING_SNAKE_CASE : Any = ds['''train'''].column_names
else:
__SCREAMING_SNAKE_CASE : int = ds['''validation'''].column_names
if data_args.image_column_name is not None:
__SCREAMING_SNAKE_CASE : List[Any] = data_args.image_column_name
elif "image" in column_names:
__SCREAMING_SNAKE_CASE : str = '''image'''
elif "img" in column_names:
__SCREAMING_SNAKE_CASE : List[str] = '''img'''
else:
__SCREAMING_SNAKE_CASE : Tuple = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
__SCREAMING_SNAKE_CASE : Any = Compose(
[
Lambda(lambda snake_case : img.convert('''RGB''' ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
__SCREAMING_SNAKE_CASE : str = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(snake_case ):
__SCREAMING_SNAKE_CASE : str = [transforms(snake_case ) for image in examples[image_column_name]]
__SCREAMING_SNAKE_CASE : str = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
__SCREAMING_SNAKE_CASE : Dict = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(snake_case )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(snake_case )
# Initialize our trainer
__SCREAMING_SNAKE_CASE : List[str] = Trainer(
model=snake_case , args=snake_case , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=snake_case , data_collator=snake_case , )
# Training
if training_args.do_train:
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
if training_args.resume_from_checkpoint is not None:
__SCREAMING_SNAKE_CASE : Tuple = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__SCREAMING_SNAKE_CASE : int = last_checkpoint
__SCREAMING_SNAKE_CASE : Tuple = trainer.train(resume_from_checkpoint=snake_case )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__SCREAMING_SNAKE_CASE : Union[str, Any] = trainer.evaluate()
trainer.log_metrics('''eval''' , snake_case )
trainer.save_metrics('''eval''' , snake_case )
# Write model card and (optionally) push to hub
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''masked-image-modeling''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-image-modeling'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case )
else:
trainer.create_model_card(**snake_case )
if __name__ == "__main__":
main()
| 74
| 1
|
def snake_case_ (__A : Tuple = 1_0 ) -> int:
if not isinstance(__A , __A ) or n < 0:
raise ValueError("""Invalid input""" )
__lowerCAmelCase : List[Any] = 1_0**n
__lowerCAmelCase : Optional[Any] = 2_8_4_3_3 * (pow(2 , 7_8_3_0_4_5_7 , __A )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'{solution(10) = }')
| 709
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""caidas/swin2sr-classicalsr-x2-64""": (
"""https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowerCamelCase : List[Any] ="swin2sr"
lowerCamelCase : Optional[int] ={
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : str , lowerCAmelCase : List[str]=64 , lowerCAmelCase : Any=1 , lowerCAmelCase : int=3 , lowerCAmelCase : Union[str, Any]=1_80 , lowerCAmelCase : int=[6, 6, 6, 6, 6, 6] , lowerCAmelCase : Any=[6, 6, 6, 6, 6, 6] , lowerCAmelCase : Optional[Any]=8 , lowerCAmelCase : List[Any]=2.0 , lowerCAmelCase : Dict=True , lowerCAmelCase : Tuple=0.0 , lowerCAmelCase : Optional[int]=0.0 , lowerCAmelCase : Tuple=0.1 , lowerCAmelCase : Optional[Any]="gelu" , lowerCAmelCase : int=False , lowerCAmelCase : Union[str, Any]=0.02 , lowerCAmelCase : int=1e-5 , lowerCAmelCase : str=2 , lowerCAmelCase : Any=1.0 , lowerCAmelCase : Union[str, Any]="1conv" , lowerCAmelCase : str="pixelshuffle" , **lowerCAmelCase : Any , ) -> Dict:
"""simple docstring"""
super().__init__(**lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = image_size
__lowerCAmelCase : Union[str, Any] = patch_size
__lowerCAmelCase : List[str] = num_channels
__lowerCAmelCase : Optional[int] = embed_dim
__lowerCAmelCase : Any = depths
__lowerCAmelCase : str = len(lowerCAmelCase )
__lowerCAmelCase : List[str] = num_heads
__lowerCAmelCase : str = window_size
__lowerCAmelCase : Dict = mlp_ratio
__lowerCAmelCase : List[str] = qkv_bias
__lowerCAmelCase : str = hidden_dropout_prob
__lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
__lowerCAmelCase : Tuple = drop_path_rate
__lowerCAmelCase : int = hidden_act
__lowerCAmelCase : Any = use_absolute_embeddings
__lowerCAmelCase : Any = layer_norm_eps
__lowerCAmelCase : Tuple = initializer_range
__lowerCAmelCase : Union[str, Any] = upscale
__lowerCAmelCase : List[Any] = img_range
__lowerCAmelCase : List[Any] = resi_connection
__lowerCAmelCase : Union[str, Any] = upsampler
| 218
| 0
|
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
UpperCamelCase__ = logging.getLogger(__name__)
UpperCamelCase__ = tf.data.AUTOTUNE
def _UpperCamelCase ():
"""simple docstring"""
UpperCamelCase__ = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""" , type=a__ , default="""roberta-base""" , help="""The model config to use. Note that we don't copy the model's weights, only the config!""" , )
parser.add_argument(
"""--tokenizer""" , type=a__ , default="""unigram-tokenizer-wikitext""" , help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""" , )
parser.add_argument(
"""--per_replica_batch_size""" , type=a__ , default=8 , help="""Batch size per TPU core.""" , )
parser.add_argument(
"""--no_tpu""" , action="""store_true""" , help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""" , )
parser.add_argument(
"""--tpu_name""" , type=a__ , help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""" , default="""local""" , )
parser.add_argument(
"""--tpu_zone""" , type=a__ , help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" , )
parser.add_argument(
"""--gcp_project""" , type=a__ , help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""" , action="""store_true""" , help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" , )
parser.add_argument(
"""--train_dataset""" , type=a__ , help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--shuffle_buffer_size""" , type=a__ , default=2**18 , help="""Size of the shuffle buffer (in samples)""" , )
parser.add_argument(
"""--eval_dataset""" , type=a__ , help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--num_epochs""" , type=a__ , default=1 , help="""Number of epochs to train for.""" , )
parser.add_argument(
"""--learning_rate""" , type=a__ , default=1e-4 , help="""Learning rate to use for training.""" , )
parser.add_argument(
"""--weight_decay_rate""" , type=a__ , default=1e-3 , help="""Weight decay rate to use for training.""" , )
parser.add_argument(
"""--max_length""" , type=a__ , default=512 , help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" , )
parser.add_argument(
"""--mlm_probability""" , type=a__ , default=0.15 , help="""Fraction of tokens to mask during training.""" , )
parser.add_argument("""--output_dir""" , type=a__ , required=a__ , help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""" , type=a__ , help="""Model ID to upload to on the Hugging Face Hub.""" )
UpperCamelCase__ = parser.parse_args()
return args
def _UpperCamelCase (a__ :Optional[int] ):
"""simple docstring"""
try:
if args.tpu_name:
UpperCamelCase__ = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
UpperCamelCase__ = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(a__ )
tf.tpu.experimental.initialize_tpu_system(a__ )
return tpu
def _UpperCamelCase (a__ :Tuple ):
"""simple docstring"""
UpperCamelCase__ = 0
for file in file_list:
UpperCamelCase__ = file.split("""/""" )[-1]
UpperCamelCase__ = re.search(r"""-\d+-(\d+)\.tfrecord""" , a__ ).group(1 )
UpperCamelCase__ = int(a__ )
num_samples += sample_count
return num_samples
def _UpperCamelCase (a__ :str , a__ :Tuple , a__ :Optional[Any] , a__ :List[Any] , a__ :Dict , a__ :Union[str, Any]=None ):
"""simple docstring"""
UpperCamelCase__ = count_samples(a__ )
UpperCamelCase__ = tf.data.Dataset.from_tensor_slices(a__ )
if shuffle:
UpperCamelCase__ = dataset.shuffle(len(a__ ) )
UpperCamelCase__ = tf.data.TFRecordDataset(a__ , num_parallel_reads=a__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
UpperCamelCase__ = dataset.apply(tf.data.experimental.assert_cardinality(a__ ) )
UpperCamelCase__ = dataset.map(a__ , num_parallel_calls=a__ )
if shuffle:
assert shuffle_buffer_size is not None
UpperCamelCase__ = dataset.shuffle(args.shuffle_buffer_size )
UpperCamelCase__ = dataset.batch(a__ , drop_remainder=a__ )
UpperCamelCase__ = dataset.map(a__ , num_parallel_calls=a__ )
UpperCamelCase__ = dataset.prefetch(a__ )
return dataset
def _UpperCamelCase (a__ :Any ):
"""simple docstring"""
if not args.no_tpu:
UpperCamelCase__ = initialize_tpu(a__ )
UpperCamelCase__ = tf.distribute.TPUStrategy(a__ )
else:
UpperCamelCase__ = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
UpperCamelCase__ = AutoTokenizer.from_pretrained(args.tokenizer )
UpperCamelCase__ = AutoConfig.from_pretrained(args.pretrained_model_config )
UpperCamelCase__ = tokenizer.vocab_size
UpperCamelCase__ = tf.io.gfile.glob(os.path.join(args.train_dataset , """*.tfrecord""" ) )
if not training_records:
raise ValueError(f"""No .tfrecord files found in {args.train_dataset}.""" )
UpperCamelCase__ = tf.io.gfile.glob(os.path.join(args.eval_dataset , """*.tfrecord""" ) )
if not eval_records:
raise ValueError(f"""No .tfrecord files found in {args.eval_dataset}.""" )
UpperCamelCase__ = count_samples(a__ )
UpperCamelCase__ = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
UpperCamelCase__ = steps_per_epoch * args.num_epochs
with strategy.scope():
UpperCamelCase__ = TFAutoModelForMaskedLM.from_config(a__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
UpperCamelCase__ , UpperCamelCase__ = create_optimizer(
num_train_steps=a__ , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=a__ , metrics=["""accuracy"""] )
def decode_fn(a__ :int ):
UpperCamelCase__ = {
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(a__ , a__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
UpperCamelCase__ = DataCollatorForLanguageModeling(
tokenizer=a__ , mlm_probability=args.mlm_probability , mlm=a__ , return_tensors="""tf""" )
def mask_with_collator(a__ :Dict ):
# TF really needs an isin() function
UpperCamelCase__ = (
~tf.cast(batch["""attention_mask"""] , tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
UpperCamelCase__ , UpperCamelCase__ = data_collator.tf_mask_tokens(
batch["""input_ids"""] , vocab_size=len(a__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=a__ , )
return batch
UpperCamelCase__ = args.per_replica_batch_size * strategy.num_replicas_in_sync
UpperCamelCase__ = prepare_dataset(
a__ , decode_fn=a__ , mask_fn=a__ , batch_size=a__ , shuffle=a__ , shuffle_buffer_size=args.shuffle_buffer_size , )
UpperCamelCase__ = prepare_dataset(
a__ , decode_fn=a__ , mask_fn=a__ , batch_size=a__ , shuffle=a__ , )
UpperCamelCase__ = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=a__ ) )
model.fit(
a__ , validation_data=a__ , epochs=args.num_epochs , callbacks=a__ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
UpperCamelCase__ = parse_args()
main(args)
| 619
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"configuration_roberta_prelayernorm": [
"ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP",
"RobertaPreLayerNormConfig",
"RobertaPreLayerNormOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaPreLayerNormForCausalLM",
"RobertaPreLayerNormForMaskedLM",
"RobertaPreLayerNormForMultipleChoice",
"RobertaPreLayerNormForQuestionAnswering",
"RobertaPreLayerNormForSequenceClassification",
"RobertaPreLayerNormForTokenClassification",
"RobertaPreLayerNormModel",
"RobertaPreLayerNormPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaPreLayerNormForCausalLM",
"TFRobertaPreLayerNormForMaskedLM",
"TFRobertaPreLayerNormForMultipleChoice",
"TFRobertaPreLayerNormForQuestionAnswering",
"TFRobertaPreLayerNormForSequenceClassification",
"TFRobertaPreLayerNormForTokenClassification",
"TFRobertaPreLayerNormMainLayer",
"TFRobertaPreLayerNormModel",
"TFRobertaPreLayerNormPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"FlaxRobertaPreLayerNormForCausalLM",
"FlaxRobertaPreLayerNormForMaskedLM",
"FlaxRobertaPreLayerNormForMultipleChoice",
"FlaxRobertaPreLayerNormForQuestionAnswering",
"FlaxRobertaPreLayerNormForSequenceClassification",
"FlaxRobertaPreLayerNormForTokenClassification",
"FlaxRobertaPreLayerNormModel",
"FlaxRobertaPreLayerNormPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 532
| 0
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
lowercase = random.Random()
def __lowerCAmelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Any=1.0 , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Dict=None ) -> List[str]:
if rng is None:
lowerCamelCase_ = global_rng
lowerCamelCase_ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __A( unittest.TestCase ):
def __init__( self : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : str=7 , __UpperCamelCase : Tuple=4_0_0 , __UpperCamelCase : Any=2_0_0_0 , __UpperCamelCase : int=2_0_4_8 , __UpperCamelCase : Optional[int]=1_2_8 , __UpperCamelCase : Tuple=1 , __UpperCamelCase : Dict=5_1_2 , __UpperCamelCase : Union[str, Any]=3_0 , __UpperCamelCase : Optional[int]=4_4_1_0_0 , ):
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = min_seq_length
lowerCamelCase_ = max_seq_length
lowerCamelCase_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCamelCase_ = spectrogram_length
lowerCamelCase_ = feature_size
lowerCamelCase_ = num_audio_channels
lowerCamelCase_ = hop_length
lowerCamelCase_ = chunk_length
lowerCamelCase_ = sampling_rate
def lowercase__ ( self : Tuple ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def lowercase__ ( self : Optional[int] , __UpperCamelCase : int=False , __UpperCamelCase : Tuple=False ):
def _flatten(__UpperCamelCase : Any ):
return list(itertools.chain(*__UpperCamelCase ) )
if equal_length:
lowerCamelCase_ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCamelCase_ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCamelCase_ = [np.asarray(__UpperCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __A( UpperCAmelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE = TvltFeatureExtractor
def lowercase__ ( self : Union[str, Any] ):
lowerCamelCase_ = TvltFeatureExtractionTester(self )
def lowercase__ ( self : Union[str, Any] ):
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(__UpperCamelCase , """spectrogram_length""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """feature_size""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """num_audio_channels""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """hop_length""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """chunk_length""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """sampling_rate""" ) )
def lowercase__ ( self : Any ):
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase_ = feat_extract_first.save_pretrained(__UpperCamelCase )[0]
check_json_file_has_correct_format(__UpperCamelCase )
lowerCamelCase_ = self.feature_extraction_class.from_pretrained(__UpperCamelCase )
lowerCamelCase_ = feat_extract_first.to_dict()
lowerCamelCase_ = feat_extract_second.to_dict()
lowerCamelCase_ = dict_first.pop("""mel_filters""" )
lowerCamelCase_ = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : Optional[int] ):
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase_ = os.path.join(__UpperCamelCase , """feat_extract.json""" )
feat_extract_first.to_json_file(__UpperCamelCase )
lowerCamelCase_ = self.feature_extraction_class.from_json_file(__UpperCamelCase )
lowerCamelCase_ = feat_extract_first.to_dict()
lowerCamelCase_ = feat_extract_second.to_dict()
lowerCamelCase_ = dict_first.pop("""mel_filters""" )
lowerCamelCase_ = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : int ):
# Initialize feature_extractor
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCamelCase_ = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs]
# Test not batched input
lowerCamelCase_ = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
lowerCamelCase_ = feature_extractor(__UpperCamelCase , return_tensors="""np""" , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
lowerCamelCase_ = feature_extractor(
__UpperCamelCase , return_tensors="""np""" , sampling_rate=4_4_1_0_0 , mask_audio=__UpperCamelCase ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
lowerCamelCase_ = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
lowerCamelCase_ = np.asarray(__UpperCamelCase )
lowerCamelCase_ = feature_extractor(__UpperCamelCase , return_tensors="""np""" , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def lowercase__ ( self : Dict , __UpperCamelCase : Optional[Any] ):
lowerCamelCase_ = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
lowerCamelCase_ = ds.sort("""id""" ).select(range(__UpperCamelCase ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def lowercase__ ( self : Dict ):
lowerCamelCase_ = self._load_datasamples(1 )
lowerCamelCase_ = TvltFeatureExtractor()
lowerCamelCase_ = feature_extractor(__UpperCamelCase , return_tensors="""pt""" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_9_2, 1_2_8) )
lowerCamelCase_ = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , __UpperCamelCase , atol=1E-4 ) )
| 705
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
lowercase = logging.get_logger(__name__)
lowercase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase = {
'''vocab_file''': {'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'''},
'''tokenizer_file''': {
'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'''
},
}
lowercase = {'''mobilebert-uncased''': 5_1_2}
lowercase = {}
class __A( UpperCAmelCase ):
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = MobileBertTokenizer
def __init__( self : Union[str, Any] , __UpperCamelCase : str=None , __UpperCamelCase : str=None , __UpperCamelCase : Dict=True , __UpperCamelCase : Any="[UNK]" , __UpperCamelCase : str="[SEP]" , __UpperCamelCase : Dict="[PAD]" , __UpperCamelCase : List[str]="[CLS]" , __UpperCamelCase : Any="[MASK]" , __UpperCamelCase : Any=True , __UpperCamelCase : int=None , **__UpperCamelCase : Dict , ):
super().__init__(
__UpperCamelCase , tokenizer_file=__UpperCamelCase , do_lower_case=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , pad_token=__UpperCamelCase , cls_token=__UpperCamelCase , mask_token=__UpperCamelCase , tokenize_chinese_chars=__UpperCamelCase , strip_accents=__UpperCamelCase , **__UpperCamelCase , )
lowerCamelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , __UpperCamelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , __UpperCamelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , __UpperCamelCase ) != tokenize_chinese_chars
):
lowerCamelCase_ = getattr(__UpperCamelCase , normalizer_state.pop("""type""" ) )
lowerCamelCase_ = do_lower_case
lowerCamelCase_ = strip_accents
lowerCamelCase_ = tokenize_chinese_chars
lowerCamelCase_ = normalizer_class(**__UpperCamelCase )
lowerCamelCase_ = do_lower_case
def lowercase__ ( self : List[str] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict=None ):
lowerCamelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase__ ( self : Tuple , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : List[str] , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ):
lowerCamelCase_ = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase )
return tuple(__UpperCamelCase )
| 103
| 0
|
from math import asin, atan, cos, radians, sin, sqrt, tan
a_ = 637_8137.0
a_ = 635_6752.31_4245
a_ = 637_8137
def lowerCamelCase__ ( _a , _a , _a , _a):
SCREAMING_SNAKE_CASE : List[str] = (AXIS_A - AXIS_B) / AXIS_A
SCREAMING_SNAKE_CASE : Optional[Any] = atan((1 - flattening) * tan(radians(lowerCAmelCase__)))
SCREAMING_SNAKE_CASE : int = atan((1 - flattening) * tan(radians(lowerCAmelCase__)))
SCREAMING_SNAKE_CASE : Optional[int] = radians(lowerCAmelCase__)
SCREAMING_SNAKE_CASE : Optional[int] = radians(lowerCAmelCase__)
# Equation
SCREAMING_SNAKE_CASE : Optional[int] = sin((phi_a - phi_a) / 2)
SCREAMING_SNAKE_CASE : List[Any] = sin((lambda_a - lambda_a) / 2)
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
SCREAMING_SNAKE_CASE : Any = sqrt(sin_sq_phi + (cos(lowerCAmelCase__) * cos(lowerCAmelCase__) * sin_sq_lambda))
return 2 * RADIUS * asin(lowerCAmelCase__)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25
|
'''simple docstring'''
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
__SCREAMING_SNAKE_CASE = get_logger(__name__)
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCamelCase = "dummy_data"
__UpperCamelCase = "datasets"
__UpperCamelCase = False
def __init__( self : Any , A__ : str , A__ : str , A__ : Union[Version, str] , A__ : Optional[str] = None , A__ : bool = False , A__ : bool = True , A__ : Optional[List[Callable]] = None , ) -> int:
'''simple docstring'''
a__ : Tuple = 0
a__ : Any = dataset_name
a__ : int = cache_dir
a__ : str = use_local_dummy_data
a__ : List[str] = config
# download_callbacks take a single url as input
a__ : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
a__ : str = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
a__ : Optional[Any] = str(A__ )
# to be downloaded
a__ : Tuple = None
a__ : Tuple = None
@property
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
if self._dummy_file is None:
a__ : Dict = self.download_dummy_data()
return self._dummy_file
@property
def __lowerCAmelCase ( self : Any ) -> Optional[int]:
'''simple docstring'''
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('''dummy''' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('''dummy''' , self.version_name )
@property
def __lowerCAmelCase ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
return os.path.join(self.dummy_data_folder , '''dummy_data.zip''' )
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
a__ : int = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
a__ : str = cached_path(
A__ , cache_dir=self.cache_dir , extract_compressed_file=A__ , force_extract=A__ )
return os.path.join(A__ , self.dummy_file_name )
@property
def __lowerCAmelCase ( self : int ) -> Optional[int]:
'''simple docstring'''
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if self._bucket_url is None:
a__ : int = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '''/''' ) )
return self._bucket_url
@property
def __lowerCAmelCase ( self : List[Any] ) -> Dict:
'''simple docstring'''
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '''/''' ).split('''/''' )[:-1] )
def __lowerCAmelCase ( self : Union[str, Any] , A__ : Optional[int] , *A__ : int ) -> Union[str, Any]:
'''simple docstring'''
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
a__ : Tuple = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
a__ : Union[str, Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(A__ , A__ ):
return self.create_dummy_data_dict(A__ , A__ )
elif isinstance(A__ , (list, tuple) ):
return self.create_dummy_data_list(A__ , A__ )
else:
return self.create_dummy_data_single(A__ , A__ )
def __lowerCAmelCase ( self : List[str] , A__ : Any , *A__ : int ) -> Any:
'''simple docstring'''
return self.download_and_extract(A__ )
def __lowerCAmelCase ( self : Any , A__ : Optional[int] , A__ : Optional[Any] ) -> int:
'''simple docstring'''
return self.download_and_extract(A__ )
def __lowerCAmelCase ( self : Union[str, Any] , A__ : int , *A__ : List[Any] , **A__ : str ) -> Optional[Any]:
'''simple docstring'''
return path
def __lowerCAmelCase ( self : List[Any] ) -> str:
'''simple docstring'''
return {}
def __lowerCAmelCase ( self : int , A__ : Union[str, Any] , A__ : List[str] ) -> Any:
'''simple docstring'''
a__ : int = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(A__ , A__ ):
for single_url in single_urls:
download_callback(A__ )
else:
a__ : Dict = single_urls
download_callback(A__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(A__ , A__ ):
a__ : Optional[int] = [os.path.join(A__ , urllib.parse.quote_plus(Path(A__ ).name ) ) for x in single_urls]
else:
a__ : Optional[Any] = single_urls
a__ : Tuple = os.path.join(A__ , urllib.parse.quote_plus(Path(A__ ).name ) )
a__ : List[str] = value
# make sure that values are unique
if all(isinstance(A__ , A__ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
a__ : Optional[int] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __lowerCAmelCase ( self : Dict , A__ : str , A__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
a__ : str = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
a__ : Union[str, Any] = all(bool(re.findall('''[0-9]{3,}-of-[0-9]{3,}''' , A__ ) ) for url in data_url )
a__ : Optional[Any] = all(
url.startswith('''https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed''' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
a__ : Dict = [data_url[0]] * len(A__ )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(A__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
a__ : Optional[int] = os.path.join(A__ , urllib.parse.quote_plus(single_url.split('''/''' )[-1] ) )
dummy_data_list.append(A__ )
return dummy_data_list
def __lowerCAmelCase ( self : Dict , A__ : Dict , A__ : str ) -> Optional[int]:
'''simple docstring'''
for download_callback in self.download_callbacks:
download_callback(A__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
a__ : Union[str, Any] = os.path.join(A__ , urllib.parse.quote_plus(data_url.split('''/''' )[-1] ) )
if os.path.exists(A__ ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __lowerCAmelCase ( self : int ) -> str:
'''simple docstring'''
pass
def __lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
pass
def __lowerCAmelCase ( self : Any , A__ : Tuple ) -> Any:
'''simple docstring'''
def _iter_archive_members(A__ : str ):
# this preserves the order of the members inside the ZIP archive
a__ : Dict = Path(self.dummy_file ).parent
a__ : Tuple = path.relative_to(A__ )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
a__ : Optional[Any] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(A__ )
a__ : str = Path(A__ )
a__ : Optional[Any] = _iter_archive_members(A__ ) if self.use_local_dummy_data else path.rglob('''*''' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('''.''', '''__''') ):
yield file_path.relative_to(A__ ).as_posix(), file_path.open('''rb''' )
def __lowerCAmelCase ( self : Tuple , A__ : Tuple ) -> Tuple:
'''simple docstring'''
if not isinstance(A__ , A__ ):
a__ : int = [paths]
for path in paths:
if os.path.isfile(A__ ):
if os.path.basename(A__ ).startswith(('''.''', '''__''') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(A__ ):
if os.path.basename(A__ ).startswith(('''.''', '''__''') ):
continue
dirnames.sort()
for filename in sorted(A__ ):
if filename.startswith(('''.''', '''__''') ):
continue
yield os.path.join(A__ , A__ )
| 688
| 0
|
"""simple docstring"""
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
__A = importlib.util.find_spec('''s3fs''') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
__A = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowercase_ ( _lowerCamelCase: str ) -> str:
'''simple docstring'''
if "://" in dataset_path:
__lowerCamelCase : List[str] = dataset_path.split("://" )[1]
return dataset_path
def lowercase_ ( _lowerCamelCase: fsspec.AbstractFileSystem ) -> bool:
'''simple docstring'''
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowercase_ ( _lowerCamelCase: fsspec.AbstractFileSystem , _lowerCamelCase: str , _lowerCamelCase: str ) -> str:
'''simple docstring'''
__lowerCamelCase : List[Any] = not is_remote_filesystem(_lowerCamelCase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(_lowerCamelCase ) , fs._strip_protocol(_lowerCamelCase ) )
else:
fs.mv(_lowerCamelCase , _lowerCamelCase , recursive=_lowerCamelCase )
def lowercase_ ( ) -> None:
'''simple docstring'''
if hasattr(fsspec.asyn , "reset_lock" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
__lowerCamelCase : Optional[Any] = None
__lowerCamelCase : Any = None
__lowerCamelCase : str = threading.Lock()
| 366
|
"""simple docstring"""
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class _snake_case :
snake_case__ = None
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = None
snake_case__ = None
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = True
snake_case__ = None
snake_case__ = 1
snake_case__ = None
snake_case__ = False
snake_case__ = None
snake_case__ = None
def lowerCamelCase__ ( self : Any ):
return self.__class__(**{k: copy.deepcopy(UpperCAmelCase ) for k, v in self.__dict__.items()} )
| 366
| 1
|
def _lowercase( __a : int ):
a__ =len(__a )
a__ =sum(__a )
a__ =[[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
a__ =True
for i in range(1 , s + 1 ):
a__ =False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
a__ =dp[i][j - 1]
if arr[i - 1] <= j:
a__ =dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
a__ =s - 2 * j
break
return diff
| 20
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __snake_case ( unittest.TestCase):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : Union[str, Any]=7 , lowerCamelCase : Any=3 , lowerCamelCase : Tuple=18 , lowerCamelCase : List[Any]=30 , lowerCamelCase : str=4_00 , lowerCamelCase : List[str]=True , lowerCamelCase : List[Any]=None , lowerCamelCase : Any=True , ) -> Any:
lowerCAmelCase_ : Optional[int] = size if size is not None else {"""height""": 18, """width""": 18}
lowerCAmelCase_ : Dict = parent
lowerCAmelCase_ : Tuple = batch_size
lowerCAmelCase_ : int = num_channels
lowerCAmelCase_ : List[str] = image_size
lowerCAmelCase_ : Optional[int] = min_resolution
lowerCAmelCase_ : List[str] = max_resolution
lowerCAmelCase_ : List[str] = do_resize
lowerCAmelCase_ : str = size
lowerCAmelCase_ : int = apply_ocr
def __lowercase ( self : Optional[Any] ) -> List[Any]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __snake_case ( _SCREAMING_SNAKE_CASE ,unittest.TestCase):
"""simple docstring"""
lowercase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __lowercase ( self : List[Any] ) -> Dict:
lowerCAmelCase_ : Union[str, Any] = LayoutLMvaImageProcessingTester(self )
@property
def __lowercase ( self : Optional[Any] ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __lowercase ( self : Optional[Any] ) -> Dict:
lowerCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase , """size""" ) )
self.assertTrue(hasattr(lowerCamelCase , """apply_ocr""" ) )
def __lowercase ( self : List[str] ) -> int:
lowerCAmelCase_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
lowerCAmelCase_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def __lowercase ( self : str ) -> Dict:
pass
def __lowercase ( self : str ) -> Union[str, Any]:
# Initialize image_processing
lowerCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
lowerCAmelCase_ : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , lowerCamelCase )
self.assertIsInstance(encoding.boxes , lowerCamelCase )
# Test batched
lowerCAmelCase_ : Union[str, Any] = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __lowercase ( self : Any ) -> Any:
# Initialize image_processing
lowerCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
lowerCAmelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase_ : Optional[int] = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __lowercase ( self : List[str] ) -> int:
# Initialize image_processing
lowerCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
lowerCAmelCase_ : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase_ : Tuple = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __lowercase ( self : int ) -> Optional[Any]:
# with apply_OCR = True
lowerCAmelCase_ : Optional[int] = LayoutLMvaImageProcessor()
from datasets import load_dataset
lowerCAmelCase_ : int = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" )
lowerCAmelCase_ : Any = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
lowerCAmelCase_ : Dict = image_processing(lowerCamelCase , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
lowerCAmelCase_ : Union[str, Any] = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
lowerCAmelCase_ : Any = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , lowerCamelCase )
self.assertListEqual(encoding.boxes , lowerCamelCase )
# with apply_OCR = False
lowerCAmelCase_ : Dict = LayoutLMvaImageProcessor(apply_ocr=lowerCamelCase )
lowerCAmelCase_ : Dict = image_processing(lowerCamelCase , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 275
| 0
|
"""simple docstring"""
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
lowercase_ : Any = 0
lowercase_ : List[Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowercase_ : str = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
lowercase_ : List[str] = tuple[int, int]
class UpperCamelCase :
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = pos_x
_SCREAMING_SNAKE_CASE : Optional[Any] = pos_y
_SCREAMING_SNAKE_CASE : Optional[Any] = (pos_y, pos_x)
_SCREAMING_SNAKE_CASE : Any = goal_x
_SCREAMING_SNAKE_CASE : Any = goal_y
_SCREAMING_SNAKE_CASE : str = g_cost
_SCREAMING_SNAKE_CASE : Any = parent
_SCREAMING_SNAKE_CASE : Optional[int] = self.calculate_heuristic()
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.g_cost + self.h_cost
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = self.pos_x - self.goal_x
_SCREAMING_SNAKE_CASE : Any = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(snake_case__ ) + abs(snake_case__ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , snake_case__ ):
"""simple docstring"""
return self.f_cost < other.f_cost
class UpperCamelCase :
def __init__( self , snake_case__ , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , snake_case__ )
_SCREAMING_SNAKE_CASE : Any = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , snake_case__ )
_SCREAMING_SNAKE_CASE : int = [self.start]
_SCREAMING_SNAKE_CASE : list[Node] = []
_SCREAMING_SNAKE_CASE : Optional[Any] = False
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_SCREAMING_SNAKE_CASE : Any = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(snake_case__ )
self.closed_nodes.append(snake_case__ )
_SCREAMING_SNAKE_CASE : Tuple = self.get_successors(snake_case__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(snake_case__ )
else:
# retrieve the best current path
_SCREAMING_SNAKE_CASE : List[Any] = self.open_nodes.pop(self.open_nodes.index(snake_case__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(snake_case__ )
else:
self.open_nodes.append(snake_case__ )
return [self.start.pos]
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = []
for action in delta:
_SCREAMING_SNAKE_CASE : Dict = parent.pos_x + action[1]
_SCREAMING_SNAKE_CASE : int = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(snake_case__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
snake_case__ , snake_case__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , snake_case__ , ) )
return successors
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = node
_SCREAMING_SNAKE_CASE : str = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_SCREAMING_SNAKE_CASE : Optional[int] = current_node.parent
path.reverse()
return path
class UpperCamelCase :
def __init__( self , snake_case__ , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = AStar(snake_case__ , snake_case__ )
_SCREAMING_SNAKE_CASE : Dict = AStar(snake_case__ , snake_case__ )
_SCREAMING_SNAKE_CASE : List[Any] = False
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
_SCREAMING_SNAKE_CASE : str = self.fwd_astar.open_nodes.pop(0 )
_SCREAMING_SNAKE_CASE : int = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
snake_case__ , snake_case__ )
self.fwd_astar.closed_nodes.append(snake_case__ )
self.bwd_astar.closed_nodes.append(snake_case__ )
_SCREAMING_SNAKE_CASE : List[str] = current_bwd_node
_SCREAMING_SNAKE_CASE : Tuple = current_fwd_node
_SCREAMING_SNAKE_CASE : List[Any] = {
self.fwd_astar: self.fwd_astar.get_successors(snake_case__ ),
self.bwd_astar: self.bwd_astar.get_successors(snake_case__ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(snake_case__ )
else:
# retrieve the best current path
_SCREAMING_SNAKE_CASE : Any = astar.open_nodes.pop(
astar.open_nodes.index(snake_case__ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(snake_case__ )
else:
astar.open_nodes.append(snake_case__ )
return [self.fwd_astar.start.pos]
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = self.fwd_astar.retrace_path(snake_case__ )
_SCREAMING_SNAKE_CASE : List[Any] = self.bwd_astar.retrace_path(snake_case__ )
bwd_path.pop()
bwd_path.reverse()
_SCREAMING_SNAKE_CASE : Dict = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
lowercase_ : Dict = (0, 0)
lowercase_ : int = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
lowercase_ : Dict = time.time()
lowercase_ : Optional[Any] = AStar(init, goal)
lowercase_ : Any = a_star.search()
lowercase_ : str = time.time() - start_time
print(F'AStar execution time = {end_time:f} seconds')
lowercase_ : Union[str, Any] = time.time()
lowercase_ : Optional[int] = BidirectionalAStar(init, goal)
lowercase_ : Optional[Any] = time.time() - bd_start_time
print(F'BidirectionalAStar execution time = {bd_end_time:f} seconds')
| 707
|
"""simple docstring"""
import collections
import os
import re
from pathlib import Path
lowercase_ : Union[str, Any] = '''src/transformers'''
# Matches is_xxx_available()
lowercase_ : str = re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
lowercase_ : List[Any] = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowercase_ : Dict = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
lowercase_ : int = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
lowercase_ : Optional[Any] = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowercase_ : List[Any] = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
lowercase_ : Union[str, Any] = re.compile(R'''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
lowercase_ : Tuple = re.compile(R'''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
lowercase_ : List[Any] = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
lowercase_ : Optional[Any] = re.compile(R'''^\s*try:''')
# Catches a line with else:
lowercase_ : List[Any] = re.compile(R'''^\s*else:''')
def _lowerCAmelCase ( lowerCamelCase__ : str ) -> Dict:
if _re_test_backend.search(lowerCamelCase__ ) is None:
return None
_SCREAMING_SNAKE_CASE : Any = [b[0] for b in _re_backend.findall(lowerCamelCase__ )]
backends.sort()
return "_and_".join(lowerCamelCase__ )
def _lowerCAmelCase ( lowerCamelCase__ : List[str] ) -> Any:
with open(lowerCamelCase__, "r", encoding="utf-8", newline="\n" ) as f:
_SCREAMING_SNAKE_CASE : str = f.readlines()
_SCREAMING_SNAKE_CASE : List[str] = 0
while line_index < len(lowerCamelCase__ ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowerCamelCase__ ):
return None
# First grab the objects without a specific backend in _import_structure
_SCREAMING_SNAKE_CASE : Union[str, Any] = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
_SCREAMING_SNAKE_CASE : int = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowerCamelCase__ ):
_SCREAMING_SNAKE_CASE : Optional[int] = _re_one_line_import_struct.search(lowerCamelCase__ ).groups()[0]
_SCREAMING_SNAKE_CASE : Union[str, Any] = re.findall(R"\[([^\]]+)\]", lowerCamelCase__ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
_SCREAMING_SNAKE_CASE : Tuple = _re_import_struct_key_value.search(lowerCamelCase__ )
if single_line_import_search is not None:
_SCREAMING_SNAKE_CASE : Any = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(lowerCamelCase__ ) > 0]
objects.extend(lowerCamelCase__ )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
_SCREAMING_SNAKE_CASE : Dict = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
_SCREAMING_SNAKE_CASE : Any = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_SCREAMING_SNAKE_CASE : Any = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_SCREAMING_SNAKE_CASE : Optional[int] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
_SCREAMING_SNAKE_CASE : Any = lines[line_index]
if _re_import_struct_add_one.search(lowerCamelCase__ ) is not None:
objects.append(_re_import_struct_add_one.search(lowerCamelCase__ ).groups()[0] )
elif _re_import_struct_add_many.search(lowerCamelCase__ ) is not None:
_SCREAMING_SNAKE_CASE : List[Any] = _re_import_struct_add_many.search(lowerCamelCase__ ).groups()[0].split(", " )
_SCREAMING_SNAKE_CASE : Any = [obj[1:-1] for obj in imports if len(lowerCamelCase__ ) > 0]
objects.extend(lowerCamelCase__ )
elif _re_between_brackets.search(lowerCamelCase__ ) is not None:
_SCREAMING_SNAKE_CASE : List[str] = _re_between_brackets.search(lowerCamelCase__ ).groups()[0].split(", " )
_SCREAMING_SNAKE_CASE : Any = [obj[1:-1] for obj in imports if len(lowerCamelCase__ ) > 0]
objects.extend(lowerCamelCase__ )
elif _re_quote_object.search(lowerCamelCase__ ) is not None:
objects.append(_re_quote_object.search(lowerCamelCase__ ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 1_2 + "\"" ):
objects.append(line[1_3:-3] )
line_index += 1
_SCREAMING_SNAKE_CASE : str = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_SCREAMING_SNAKE_CASE : Any = []
while (
line_index < len(lowerCamelCase__ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
_SCREAMING_SNAKE_CASE : Union[str, Any] = lines[line_index]
_SCREAMING_SNAKE_CASE : int = _re_import.search(lowerCamelCase__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
_SCREAMING_SNAKE_CASE : Optional[int] = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(lowerCamelCase__ ):
# If the line is an if is_backend_available, we grab all objects associated.
_SCREAMING_SNAKE_CASE : int = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_SCREAMING_SNAKE_CASE : Tuple = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_SCREAMING_SNAKE_CASE : Dict = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
_SCREAMING_SNAKE_CASE : Optional[Any] = lines[line_index]
_SCREAMING_SNAKE_CASE : List[Any] = _re_import.search(lowerCamelCase__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 1_2 ):
objects.append(line[1_2:-2] )
line_index += 1
_SCREAMING_SNAKE_CASE : Union[str, Any] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _lowerCAmelCase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : Dict ) -> Tuple:
def find_duplicates(lowerCamelCase__ : List[str] ):
return [k for k, v in collections.Counter(lowerCamelCase__ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_SCREAMING_SNAKE_CASE : Any = []
for key in import_dict_objects.keys():
_SCREAMING_SNAKE_CASE : Dict = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
_SCREAMING_SNAKE_CASE : List[Any] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_SCREAMING_SNAKE_CASE : Dict = "base imports" if key == "none" else f'''{key} backend'''
errors.append(f'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def _lowerCAmelCase ( ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : int = []
for root, _, files in os.walk(lowerCamelCase__ ):
if "__init__.py" in files:
_SCREAMING_SNAKE_CASE : Tuple = os.path.join(lowerCamelCase__, "__init__.py" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = parse_init(lowerCamelCase__ )
if objects is not None:
_SCREAMING_SNAKE_CASE : List[str] = analyze_results(*lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
_SCREAMING_SNAKE_CASE : Optional[int] = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append("\n".join(lowerCamelCase__ ) )
if len(lowerCamelCase__ ) > 0:
raise ValueError("\n\n".join(lowerCamelCase__ ) )
def _lowerCAmelCase ( ) -> Tuple:
_SCREAMING_SNAKE_CASE : Dict = []
for path, directories, files in os.walk(lowerCamelCase__ ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(lowerCamelCase__ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowerCamelCase__ ) / folder).glob("*.py" ) ) ) == 0:
continue
_SCREAMING_SNAKE_CASE : Dict = str((Path(lowerCamelCase__ ) / folder).relative_to(lowerCamelCase__ ) )
_SCREAMING_SNAKE_CASE : Any = short_path.replace(os.path.sep, "." )
submodules.append(lowerCamelCase__ )
for fname in files:
if fname == "__init__.py":
continue
_SCREAMING_SNAKE_CASE : Optional[int] = str((Path(lowerCamelCase__ ) / fname).relative_to(lowerCamelCase__ ) )
_SCREAMING_SNAKE_CASE : Dict = short_path.replace(".py", "" ).replace(os.path.sep, "." )
if len(submodule.split("." ) ) == 1:
submodules.append(lowerCamelCase__ )
return submodules
lowercase_ : Optional[int] = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
'''models.esm.openfold_utils''',
]
def _lowerCAmelCase ( ) -> List[str]:
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
_SCREAMING_SNAKE_CASE : Optional[Any] = direct_transformers_import(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(lowerCamelCase__, "__init__.py" ), "r" ) as f:
_SCREAMING_SNAKE_CASE : Any = f.read()
import_structure_keys.update(set(re.findall(R"import_structure\[\"([^\"]*)\"\]", lowerCamelCase__ ) ) )
_SCREAMING_SNAKE_CASE : int = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(lowerCamelCase__ ) > 0:
_SCREAMING_SNAKE_CASE : List[Any] = "\n".join(f'''- {module}''' for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registed in the main init of Transformers:\n"
f'''{list_of_modules}\n'''
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 295
| 0
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
a : List[str] = logging.get_logger(__name__)
class a ( _lowerCamelCase ):
def __init__( self : int , *lowercase_ : str , **lowercase_ : Union[str, Any] ):
warnings.warn(
'''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use BeitImageProcessor instead.''' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 640
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class a :
def __init__( self : Any , lowercase_ : int , lowercase_ : str=13 , lowercase_ : Union[str, Any]=7 , lowercase_ : Any=True , lowercase_ : List[str]=True , lowercase_ : Tuple=True , lowercase_ : List[Any]=True , lowercase_ : str=99 , lowercase_ : int=32 , lowercase_ : int=2 , lowercase_ : Any=4 , lowercase_ : Union[str, Any]=37 , lowercase_ : Optional[int]="gelu" , lowercase_ : Dict=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : Union[str, Any]=512 , lowercase_ : Dict=16 , lowercase_ : int=2 , lowercase_ : Any=0.02 , lowercase_ : int=3 , lowercase_ : Any=4 , lowercase_ : List[Any]=None , lowercase_ : List[str]=0 , ):
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
snake_case_ = projection_dim
def A_ ( self : int ):
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
snake_case_ = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A_ ( self : Dict , lowercase_ : Optional[Any] , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : str ):
snake_case_ = TFDPRContextEncoder(config=lowercase_ )
snake_case_ = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ )
snake_case_ = model(lowercase_ , token_type_ids=lowercase_ )
snake_case_ = model(lowercase_ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def A_ ( self : Any , lowercase_ : Optional[Any] , lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : Any ):
snake_case_ = TFDPRQuestionEncoder(config=lowercase_ )
snake_case_ = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ )
snake_case_ = model(lowercase_ , token_type_ids=lowercase_ )
snake_case_ = model(lowercase_ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def A_ ( self : str , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : Dict ):
snake_case_ = TFDPRReader(config=lowercase_ )
snake_case_ = model(lowercase_ , attention_mask=lowercase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def A_ ( self : Union[str, Any] ):
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,
) = config_and_inputs
snake_case_ = {'''input_ids''': input_ids}
return config, inputs_dict
@require_tf
class a ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
snake_case_ = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
snake_case_ = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def A_ ( self : List[Any] ):
snake_case_ = TFDPRModelTester(self )
snake_case_ = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def A_ ( self : Any ):
self.config_tester.run_common_tests()
def A_ ( self : int ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*lowercase_ )
def A_ ( self : List[Any] ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*lowercase_ )
def A_ ( self : Any ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*lowercase_ )
@slow
def A_ ( self : int ):
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = TFDPRContextEncoder.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = TFDPRContextEncoder.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = TFDPRQuestionEncoder.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = TFDPRReader.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@require_tf
class a ( unittest.TestCase ):
@slow
def A_ ( self : Dict ):
snake_case_ = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''' )
snake_case_ = tf.constant(
[[101, 7592, 1010, 2003, 2026, 3899, 1_0140, 1029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
snake_case_ = model(lowercase_ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
snake_case_ = tf.constant(
[
[
0.0323_6253,
0.1275_3335,
0.1681_8509,
0.0027_9786,
0.389_6933,
0.2426_4945,
0.217_8971,
-0.0233_5227,
-0.0848_1959,
-0.1432_4117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 640
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase = {
'configuration_table_transformer': [
'TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TableTransformerConfig',
'TableTransformerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TableTransformerForObjectDetection',
'TableTransformerModel',
'TableTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 701
|
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class __A ( lowerCamelCase__ ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = BertJapaneseTokenizer
UpperCAmelCase__ = False
UpperCAmelCase__ = True
def __snake_case ( self):
"""simple docstring"""
super().setUp()
_lowerCamelCase : Optional[int] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''こんにちは''',
'''こん''',
'''にちは''',
'''ばんは''',
'''##こん''',
'''##にちは''',
'''##ばんは''',
'''世界''',
'''##世界''',
'''、''',
'''##、''',
'''。''',
'''##。''',
]
_lowerCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
def __snake_case ( self , a__):
"""simple docstring"""
_lowerCamelCase : Dict = '''こんにちは、世界。 \nこんばんは、世界。'''
_lowerCamelCase : Union[str, Any] = '''こんにちは 、 世界 。 こんばんは 、 世界 。'''
return input_text, output_text
def __snake_case ( self , a__):
"""simple docstring"""
_lowerCamelCase, _lowerCamelCase : int = self.get_input_output_texts(a__)
_lowerCamelCase : Optional[Any] = tokenizer.encode(a__ , add_special_tokens=a__)
_lowerCamelCase : str = tokenizer.decode(a__ , clean_up_tokenization_spaces=a__)
return text, ids
def __snake_case ( self):
"""simple docstring"""
pass # TODO add if relevant
def __snake_case ( self):
"""simple docstring"""
pass # TODO add if relevant
def __snake_case ( self):
"""simple docstring"""
pass # TODO add if relevant
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : int = self.tokenizer_class(self.vocab_file)
_lowerCamelCase : Tuple = tokenizer.tokenize('''こんにちは、世界。\nこんばんは、世界。''')
self.assertListEqual(a__ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__) , [3, 12, 10, 14, 4, 9, 12, 10, 14])
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : str = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''mecab''')
self.assertIsNotNone(a__)
_lowerCamelCase : List[str] = '''こんにちは、世界。\nこんばんは、世界。'''
_lowerCamelCase : int = tokenizer.tokenize(a__)
self.assertListEqual(a__ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__) , [3, 12, 10, 14, 4, 9, 12, 10, 14])
_lowerCamelCase : Optional[int] = os.path.join(self.tmpdirname , '''tokenizer.bin''')
with open(a__ , '''wb''') as handle:
pickle.dump(a__ , a__)
with open(a__ , '''rb''') as handle:
_lowerCamelCase : List[str] = pickle.load(a__)
_lowerCamelCase : Dict = tokenizer_new.tokenize(a__)
self.assertListEqual(a__ , a__)
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = MecabTokenizer(mecab_dic='''ipadic''')
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __snake_case ( self):
"""simple docstring"""
try:
_lowerCamelCase : str = MecabTokenizer(mecab_dic='''unidic_lite''')
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __snake_case ( self):
"""simple docstring"""
try:
_lowerCamelCase : int = MecabTokenizer(mecab_dic='''unidic''')
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : List[Any] = MecabTokenizer(do_lower_case=a__ , mecab_dic='''ipadic''')
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップルストア''', '''で''', '''iphone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __snake_case ( self):
"""simple docstring"""
try:
_lowerCamelCase : Any = MecabTokenizer(
do_lower_case=a__ , normalize_text=a__ , mecab_option='''-d /usr/local/lib/mecab/dic/jumandic''')
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Optional[int] = MecabTokenizer(normalize_text=a__ , mecab_dic='''ipadic''')
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。'''] , )
@require_sudachi
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''sudachi''')
self.assertIsNotNone(a__)
_lowerCamelCase : Tuple = '''こんにちは、世界。\nこんばんは、世界。'''
_lowerCamelCase : Tuple = tokenizer.tokenize(a__)
self.assertListEqual(a__ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__) , [3, 12, 10, 14, 4, 9, 12, 10, 14])
_lowerCamelCase : Tuple = os.path.join(self.tmpdirname , '''tokenizer.bin''')
with open(a__ , '''wb''') as handle:
pickle.dump(a__ , a__)
with open(a__ , '''rb''') as handle:
_lowerCamelCase : str = pickle.load(a__)
_lowerCamelCase : Any = tokenizer_new.tokenize(a__)
self.assertListEqual(a__ , a__)
@require_sudachi
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Optional[int] = SudachiTokenizer(sudachi_dict_type='''core''')
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Dict = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''A''')
self.assertListEqual(tokenizer.tokenize('''外国人参政権''') , ['''外国''', '''人''', '''参政''', '''権'''])
@require_sudachi
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''B''')
self.assertListEqual(tokenizer.tokenize('''外国人参政権''') , ['''外国人''', '''参政権'''])
@require_sudachi
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Dict = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''C''')
self.assertListEqual(tokenizer.tokenize('''外国人参政権''') , ['''外国人参政権'''])
@require_sudachi
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Tuple = SudachiTokenizer(do_lower_case=a__ , sudachi_dict_type='''core''')
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = SudachiTokenizer(normalize_text=a__ , sudachi_dict_type='''core''')
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', '''\u3000''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : List[Any] = SudachiTokenizer(trim_whitespace=a__ , sudachi_dict_type='''core''')
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
@require_jumanpp
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Any = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''jumanpp''')
self.assertIsNotNone(a__)
_lowerCamelCase : List[str] = '''こんにちは、世界。\nこんばんは、世界。'''
_lowerCamelCase : List[str] = tokenizer.tokenize(a__)
self.assertListEqual(a__ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__) , [3, 12, 10, 14, 4, 9, 12, 10, 14])
_lowerCamelCase : List[str] = os.path.join(self.tmpdirname , '''tokenizer.bin''')
with open(a__ , '''wb''') as handle:
pickle.dump(a__ , a__)
with open(a__ , '''rb''') as handle:
_lowerCamelCase : Optional[Any] = pickle.load(a__)
_lowerCamelCase : Dict = tokenizer_new.tokenize(a__)
self.assertListEqual(a__ , a__)
@require_jumanpp
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = JumanppTokenizer(do_lower_case=a__)
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Dict = JumanppTokenizer(normalize_text=a__)
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''ア''', '''ッ''', '''フ''', '''゚''', '''ル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : int = JumanppTokenizer(trim_whitespace=a__)
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''。'''] , )
@require_jumanpp
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Any = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('''ありがとうございますm(_ _)m見つけるのが大変です。''') , ['''ありがとう''', '''ございます''', '''m(_ _)m''', '''見つける''', '''の''', '''が''', '''大変です''', '''。'''] , )
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : str = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''']
_lowerCamelCase : List[str] = {}
for i, token in enumerate(a__):
_lowerCamelCase : Optional[Any] = i
_lowerCamelCase : Union[str, Any] = WordpieceTokenizer(vocab=a__ , unk_token='''[UNK]''')
self.assertListEqual(tokenizer.tokenize('''''') , [])
self.assertListEqual(tokenizer.tokenize('''こんにちは''') , ['''こんにちは'''])
self.assertListEqual(tokenizer.tokenize('''こんばんは''') , ['''こん''', '''##ばんは'''])
self.assertListEqual(tokenizer.tokenize('''こんばんは こんばんにちは こんにちは''') , ['''こん''', '''##ばんは''', '''[UNK]''', '''こんにちは'''])
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Tuple = BertJapaneseTokenizer.from_pretrained('''nlp-waseda/roberta-base-japanese-with-auto-jumanpp''')
_lowerCamelCase : str = tokenizer.subword_tokenizer
_lowerCamelCase : Any = subword_tokenizer.tokenize('''国境 の 長い トンネル を 抜ける と 雪国 であった 。''')
self.assertListEqual(a__ , ['''▁国境''', '''▁の''', '''▁長い''', '''▁トンネル''', '''▁を''', '''▁抜ける''', '''▁と''', '''▁雪''', '''国''', '''▁であった''', '''▁。'''])
_lowerCamelCase : str = subword_tokenizer.tokenize('''こんばんは こんばん にち は こんにちは''')
self.assertListEqual(a__ , ['''▁こん''', '''ばん''', '''は''', '''▁こん''', '''ばん''', '''▁に''', '''ち''', '''▁は''', '''▁こんにちは'''])
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese''')
_lowerCamelCase : int = tokenizer.encode('''ありがとう。''' , add_special_tokens=a__)
_lowerCamelCase : Union[str, Any] = tokenizer.encode('''どういたしまして。''' , add_special_tokens=a__)
_lowerCamelCase : int = tokenizer.build_inputs_with_special_tokens(a__)
_lowerCamelCase : List[str] = tokenizer.build_inputs_with_special_tokens(a__ , a__)
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __A ( lowerCamelCase__ ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = BertJapaneseTokenizer
UpperCAmelCase__ = False
def __snake_case ( self):
"""simple docstring"""
super().setUp()
_lowerCamelCase : Dict = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
_lowerCamelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
def __snake_case ( self , **a__):
"""simple docstring"""
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='''character''' , **a__)
def __snake_case ( self , a__):
"""simple docstring"""
_lowerCamelCase : Any = '''こんにちは、世界。 \nこんばんは、世界。'''
_lowerCamelCase : int = '''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'''
return input_text, output_text
def __snake_case ( self):
"""simple docstring"""
pass # TODO add if relevant
def __snake_case ( self):
"""simple docstring"""
pass # TODO add if relevant
def __snake_case ( self):
"""simple docstring"""
pass # TODO add if relevant
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : int = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='''character''')
_lowerCamelCase : Optional[int] = tokenizer.tokenize('''こんにちは、世界。 \nこんばんは、世界。''')
self.assertListEqual(
a__ , ['''こ''', '''ん''', '''に''', '''ち''', '''は''', '''、''', '''世''', '''界''', '''。''', '''こ''', '''ん''', '''ば''', '''ん''', '''は''', '''、''', '''世''', '''界''', '''。'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a__) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12])
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : List[Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
_lowerCamelCase : List[str] = {}
for i, token in enumerate(a__):
_lowerCamelCase : List[Any] = i
_lowerCamelCase : Union[str, Any] = CharacterTokenizer(vocab=a__ , unk_token='''[UNK]''')
self.assertListEqual(tokenizer.tokenize('''''') , [])
self.assertListEqual(tokenizer.tokenize('''こんにちは''') , ['''こ''', '''ん''', '''に''', '''ち''', '''は'''])
self.assertListEqual(tokenizer.tokenize('''こんにちほ''') , ['''こ''', '''ん''', '''に''', '''ち''', '''[UNK]'''])
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Dict = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese-char''')
_lowerCamelCase : List[str] = tokenizer.encode('''ありがとう。''' , add_special_tokens=a__)
_lowerCamelCase : Optional[int] = tokenizer.encode('''どういたしまして。''' , add_special_tokens=a__)
_lowerCamelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(a__)
_lowerCamelCase : int = tokenizer.build_inputs_with_special_tokens(a__ , a__)
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : List[Any] = '''cl-tohoku/bert-base-japanese'''
_lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(a__)
self.assertIsInstance(a__ , a__)
class __A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = '''cl-tohoku/bert-base-japanese'''
with self.assertLogs('''transformers''' , level='''WARNING''') as cm:
BertTokenizer.from_pretrained(a__)
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.'''))
_lowerCamelCase : List[Any] = '''bert-base-cased'''
with self.assertLogs('''transformers''' , level='''WARNING''') as cm:
BertJapaneseTokenizer.from_pretrained(a__)
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.'''))
| 613
| 0
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __lowerCamelCase ( _A , unittest.TestCase ):
a__: Optional[Any] = RoCBertTokenizer
a__: List[str] = None
a__: Any = False
a__: Optional[Any] = True
a__: Dict = filter_non_english
def UpperCAmelCase__ ( self ):
super().setUp()
lowerCamelCase_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''你''', '''好''', '''是''', '''谁''', '''a''', '''b''', '''c''', '''d''']
lowerCamelCase_ = {}
lowerCamelCase_ = {}
for i, value in enumerate(UpperCamelCase__ ):
lowerCamelCase_ = i
lowerCamelCase_ = i
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_shape_file'''] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_pronunciation_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.word_shape_file , '''w''' , encoding='''utf-8''' ) as word_shape_writer:
json.dump(UpperCamelCase__ , UpperCamelCase__ , ensure_ascii=UpperCamelCase__ )
with open(self.word_pronunciation_file , '''w''' , encoding='''utf-8''' ) as word_pronunciation_writer:
json.dump(UpperCamelCase__ , UpperCamelCase__ , ensure_ascii=UpperCamelCase__ )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCamelCase_ = tokenizer.tokenize('''你好[SEP]你是谁''' )
self.assertListEqual(UpperCamelCase__ , ['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(UpperCamelCase__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(UpperCamelCase__ ) , [5, 6, 2, 5, 7, 8] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ , strip_accents=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ , strip_accents=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ , strip_accents=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ , strip_accents=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
lowerCamelCase_ = {}
for i, token in enumerate(UpperCamelCase__ ):
lowerCamelCase_ = i
lowerCamelCase_ = RoCBertWordpieceTokenizer(vocab=UpperCamelCase__ , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(UpperCamelCase__ ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
if self.test_rust_tokenizer:
lowerCamelCase_ = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(UpperCamelCase__ ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
def UpperCAmelCase__ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase_ = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
lowerCamelCase_ = tokenizer_r.encode_plus(
UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , )
lowerCamelCase_ = tokenizer_r.do_lower_case if hasattr(UpperCamelCase__ , '''do_lower_case''' ) else False
lowerCamelCase_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = ['''的''', '''人''', '''有''']
lowerCamelCase_ = ''''''.join(UpperCamelCase__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase_ = True
lowerCamelCase_ = self.tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase_ = tokenizer_p.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
lowerCamelCase_ = tokenizer_r.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(UpperCamelCase__ )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(UpperCamelCase__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase_ = False
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase_ = self.tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase_ = tokenizer_r.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
lowerCamelCase_ = tokenizer_p.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(UpperCamelCase__ )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(UpperCamelCase__ )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCamelCase_ = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(UpperCamelCase__ )
]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCamelCase_ = tokenizer.encode('''你好''' , add_special_tokens=UpperCamelCase__ )
lowerCamelCase_ = tokenizer.encode('''你是谁''' , add_special_tokens=UpperCamelCase__ )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizers(do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCamelCase_ = '''你好,你是谁'''
lowerCamelCase_ = tokenizer.tokenize(UpperCamelCase__ )
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
lowerCamelCase_ = tokenizer.convert_tokens_to_shape_ids(UpperCamelCase__ )
lowerCamelCase_ = tokenizer.convert_tokens_to_pronunciation_ids(UpperCamelCase__ )
lowerCamelCase_ = tokenizer.prepare_for_model(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
lowerCamelCase_ = tokenizer.encode_plus(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
| 29
|
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__lowerCAmelCase : Optional[Any] = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
a__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
a__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
a__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
a__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def _lowercase ( self : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] ) -> List[str]:
"""simple docstring"""
__magic_name__ = ZeroShotClassificationPipeline(
model=UpperCamelCase__ , tokenizer=UpperCamelCase__ , candidate_labels=["""polics""", """health"""] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def _lowercase ( self : int , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] ) -> str:
"""simple docstring"""
__magic_name__ = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics""" )
self.assertEqual(UpperCamelCase__ , {"""sequence""": ANY(UpperCamelCase__ ), """labels""": [ANY(UpperCamelCase__ )], """scores""": [ANY(UpperCamelCase__ )]} )
# No kwarg
__magic_name__ = classifier("""Who are you voting for in 2020?""" , ["""politics"""] )
self.assertEqual(UpperCamelCase__ , {"""sequence""": ANY(UpperCamelCase__ ), """labels""": [ANY(UpperCamelCase__ )], """scores""": [ANY(UpperCamelCase__ )]} )
__magic_name__ = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics"""] )
self.assertEqual(UpperCamelCase__ , {"""sequence""": ANY(UpperCamelCase__ ), """labels""": [ANY(UpperCamelCase__ )], """scores""": [ANY(UpperCamelCase__ )]} )
__magic_name__ = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics, public health""" )
self.assertEqual(
UpperCamelCase__ , {"""sequence""": ANY(UpperCamelCase__ ), """labels""": [ANY(UpperCamelCase__ ), ANY(UpperCamelCase__ )], """scores""": [ANY(UpperCamelCase__ ), ANY(UpperCamelCase__ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 )
__magic_name__ = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health"""] )
self.assertEqual(
UpperCamelCase__ , {"""sequence""": ANY(UpperCamelCase__ ), """labels""": [ANY(UpperCamelCase__ ), ANY(UpperCamelCase__ )], """scores""": [ANY(UpperCamelCase__ ), ANY(UpperCamelCase__ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 )
__magic_name__ = classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""This text is about {}""" )
self.assertEqual(UpperCamelCase__ , {"""sequence""": ANY(UpperCamelCase__ ), """labels""": [ANY(UpperCamelCase__ )], """scores""": [ANY(UpperCamelCase__ )]} )
# https://github.com/huggingface/transformers/issues/13846
__magic_name__ = classifier(["""I am happy"""] , ["""positive""", """negative"""] )
self.assertEqual(
UpperCamelCase__ , [
{"""sequence""": ANY(UpperCamelCase__ ), """labels""": [ANY(UpperCamelCase__ ), ANY(UpperCamelCase__ )], """scores""": [ANY(UpperCamelCase__ ), ANY(UpperCamelCase__ )]}
for i in range(1 )
] , )
__magic_name__ = classifier(["""I am happy""", """I am sad"""] , ["""positive""", """negative"""] )
self.assertEqual(
UpperCamelCase__ , [
{"""sequence""": ANY(UpperCamelCase__ ), """labels""": [ANY(UpperCamelCase__ ), ANY(UpperCamelCase__ )], """scores""": [ANY(UpperCamelCase__ ), ANY(UpperCamelCase__ )]}
for i in range(2 )
] , )
with self.assertRaises(UpperCamelCase__ ):
classifier("""""" , candidate_labels="""politics""" )
with self.assertRaises(UpperCamelCase__ ):
classifier(UpperCamelCase__ , candidate_labels="""politics""" )
with self.assertRaises(UpperCamelCase__ ):
classifier("""Who are you voting for in 2020?""" , candidate_labels="""""" )
with self.assertRaises(UpperCamelCase__ ):
classifier("""Who are you voting for in 2020?""" , candidate_labels=UpperCamelCase__ )
with self.assertRaises(UpperCamelCase__ ):
classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""Not formatting template""" , )
with self.assertRaises(UpperCamelCase__ ):
classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template=UpperCamelCase__ , )
self.run_entailment_id(UpperCamelCase__ )
def _lowercase ( self : Dict , UpperCamelCase__ : Pipeline ) -> Dict:
"""simple docstring"""
__magic_name__ = zero_shot_classifier.model.config
__magic_name__ = config.labelaid
__magic_name__ = zero_shot_classifier.entailment_id
__magic_name__ = {"""LABEL_0""": 0, """LABEL_1""": 1, """LABEL_2""": 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
__magic_name__ = {"""entailment""": 0, """neutral""": 1, """contradiction""": 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
__magic_name__ = {"""ENTAIL""": 0, """NON-ENTAIL""": 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
__magic_name__ = {"""ENTAIL""": 2, """NEUTRAL""": 1, """CONTR""": 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
__magic_name__ = original_labelaid
self.assertEqual(UpperCamelCase__ , zero_shot_classifier.entailment_id )
@require_torch
def _lowercase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"""Who are you voting for in 2020?""" * 100 , candidate_labels=["""politics""", """public health""", """science"""] )
@require_torch
def _lowercase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__magic_name__ = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , )
__magic_name__ = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.333, 0.333, 0.333],
} , )
@require_tf
def _lowercase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__magic_name__ = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""tf""" , )
__magic_name__ = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def _lowercase ( self : Any ) -> List[Any]:
"""simple docstring"""
__magic_name__ = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""pt""" )
__magic_name__ = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.976, 0.015, 0.009],
} , )
__magic_name__ = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=UpperCamelCase__ , )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , {
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def _lowercase ( self : str ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""tf""" )
__magic_name__ = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.976, 0.015, 0.009],
} , )
__magic_name__ = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=UpperCamelCase__ , )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , {
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.817, 0.713, 0.018, 0.018],
} , )
| 529
| 0
|
from __future__ import annotations
def __magic_name__ ( __a : int ):
'''simple docstring'''
UpperCamelCase__ = [True] * limit
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
UpperCamelCase__ = i * 2
while index < limit:
UpperCamelCase__ = False
UpperCamelCase__ = index + i
UpperCamelCase__ = [2]
for i in range(3 , a_ , 2 ):
if is_prime[i]:
primes.append(a_ )
return primes
def __magic_name__ ( __a : int = 1_000_000 ):
'''simple docstring'''
UpperCamelCase__ = prime_sieve(a_ )
UpperCamelCase__ = 0
UpperCamelCase__ = 0
for i in range(len(a_ ) ):
for j in range(i + length , len(a_ ) ):
UpperCamelCase__ = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
UpperCamelCase__ = j - i
UpperCamelCase__ = sol
return largest
if __name__ == "__main__":
print(f'{solution() = }')
| 705
|
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
lowerCamelCase_ = {
'''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''',
'''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''',
'''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''',
'''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''',
'''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''',
'''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''',
'''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''',
'''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''',
'''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''',
'''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''',
}
def __magic_name__ ( __a : List[str] ):
'''simple docstring'''
UpperCamelCase__ = ["""layers""", """blocks"""]
for k in ignore_keys:
state_dict.pop(__a , __a )
lowerCamelCase_ = {
'''blocks''': '''layers''',
'''mlp.0''': '''fc1''',
'''mlp.2''': '''fc2''',
'''mlp_ln''': '''final_layer_norm''',
'''.attn.query''': '''.self_attn.q_proj''',
'''.attn.key''': '''.self_attn.k_proj''',
'''.attn.value''': '''.self_attn.v_proj''',
'''.attn_ln''': '''.self_attn_layer_norm''',
'''.attn.out''': '''.self_attn.out_proj''',
'''.cross_attn.query''': '''.encoder_attn.q_proj''',
'''.cross_attn.key''': '''.encoder_attn.k_proj''',
'''.cross_attn.value''': '''.encoder_attn.v_proj''',
'''.cross_attn_ln''': '''.encoder_attn_layer_norm''',
'''.cross_attn.out''': '''.encoder_attn.out_proj''',
'''decoder.ln.''': '''decoder.layer_norm.''',
'''encoder.ln.''': '''encoder.layer_norm.''',
'''token_embedding''': '''embed_tokens''',
'''encoder.positional_embedding''': '''encoder.embed_positions.weight''',
'''decoder.positional_embedding''': '''decoder.embed_positions.weight''',
'''ln_post''': '''layer_norm''',
}
def __magic_name__ ( __a : Dict ):
'''simple docstring'''
UpperCamelCase__ = list(s_dict.keys() )
for key in keys:
UpperCamelCase__ = key
for k, v in WHISPER_MAPPING.items():
if k in key:
UpperCamelCase__ = new_key.replace(__a , __a )
print(f"{key} -> {new_key}" )
UpperCamelCase__ = s_dict.pop(__a )
return s_dict
def __magic_name__ ( __a : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = emb.weight.shape
UpperCamelCase__ = nn.Linear(__a , __a , bias=__a )
UpperCamelCase__ = emb.weight.data
return lin_layer
def __magic_name__ ( __a : str , __a : str ):
'''simple docstring'''
os.makedirs(__a , exist_ok=__a )
UpperCamelCase__ = os.path.basename(__a )
UpperCamelCase__ = url.split("""/""" )[-2]
UpperCamelCase__ = os.path.join(__a , __a )
if os.path.exists(__a ) and not os.path.isfile(__a ):
raise RuntimeError(f"{download_target} exists and is not a regular file" )
if os.path.isfile(__a ):
UpperCamelCase__ = open(__a , """rb""" ).read()
if hashlib.shaaaa(__a ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" )
with urllib.request.urlopen(__a ) as source, open(__a , """wb""" ) as output:
with tqdm(
total=int(source.info().get("""Content-Length""" ) ) , ncols=80 , unit="""iB""" , unit_scale=__a , unit_divisor=1_024 ) as loop:
while True:
UpperCamelCase__ = source.read(8_192 )
if not buffer:
break
output.write(__a )
loop.update(len(__a ) )
UpperCamelCase__ = open(__a , """rb""" ).read()
if hashlib.shaaaa(__a ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"""Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.""" )
return model_bytes
def __magic_name__ ( __a : Union[str, Any] , __a : Optional[int] ):
'''simple docstring'''
if ".pt" not in checkpoint_path:
UpperCamelCase__ = _download(_MODELS[checkpoint_path] )
else:
UpperCamelCase__ = torch.load(__a , map_location="""cpu""" )
UpperCamelCase__ = original_checkpoint["""dims"""]
UpperCamelCase__ = original_checkpoint["""model_state_dict"""]
UpperCamelCase__ = state_dict["""decoder.token_embedding.weight"""]
remove_ignore_keys_(__a )
rename_keys(__a )
UpperCamelCase__ = True
UpperCamelCase__ = state_dict["""decoder.layers.0.fc1.weight"""].shape[0]
UpperCamelCase__ = WhisperConfig(
vocab_size=dimensions["""n_vocab"""] , encoder_ffn_dim=__a , decoder_ffn_dim=__a , num_mel_bins=dimensions["""n_mels"""] , d_model=dimensions["""n_audio_state"""] , max_target_positions=dimensions["""n_text_ctx"""] , encoder_layers=dimensions["""n_audio_layer"""] , encoder_attention_heads=dimensions["""n_audio_head"""] , decoder_layers=dimensions["""n_text_layer"""] , decoder_attention_heads=dimensions["""n_text_state"""] , max_source_positions=dimensions["""n_audio_ctx"""] , )
UpperCamelCase__ = WhisperForConditionalGeneration(__a )
UpperCamelCase__ , UpperCamelCase__ = model.model.load_state_dict(__a , strict=__a )
if len(__a ) > 0 and not set(__a ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
f" but all the following weights are missing {missing}" )
if tie_embeds:
UpperCamelCase__ = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
UpperCamelCase__ = proj_out_weights
model.save_pretrained(__a )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowerCamelCase_ = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 86
| 0
|
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class lowerCAmelCase_ ( lowerCamelCase_ ):
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(snake_case__ ,'hidden_sizes' ) )
self.parent.assertTrue(hasattr(snake_case__ ,'num_attention_heads' ) )
self.parent.assertTrue(hasattr(snake_case__ ,'num_encoder_blocks' ) )
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ,snake_case__=13 ,snake_case__=64 ,snake_case__=3 ,snake_case__=4 ,snake_case__=[2, 2, 2, 2] ,snake_case__=[8, 4, 2, 1] ,snake_case__=[16, 32, 64, 128] ,snake_case__=[1, 4, 8, 16] ,snake_case__=[1, 2, 4, 8] ,snake_case__=True ,snake_case__=True ,snake_case__="gelu" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=0.02 ,snake_case__=3 ,snake_case__=None ,):
SCREAMING_SNAKE_CASE_ : Dict = parent
SCREAMING_SNAKE_CASE_ : Tuple = batch_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_size
SCREAMING_SNAKE_CASE_ : List[Any] = num_channels
SCREAMING_SNAKE_CASE_ : str = num_encoder_blocks
SCREAMING_SNAKE_CASE_ : List[str] = sr_ratios
SCREAMING_SNAKE_CASE_ : int = depths
SCREAMING_SNAKE_CASE_ : int = hidden_sizes
SCREAMING_SNAKE_CASE_ : str = downsampling_rates
SCREAMING_SNAKE_CASE_ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE_ : Optional[Any] = is_training
SCREAMING_SNAKE_CASE_ : List[str] = use_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : int = initializer_range
SCREAMING_SNAKE_CASE_ : List[str] = num_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scope
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
SCREAMING_SNAKE_CASE_ : Tuple = self.get_config()
return config, pixel_values, labels
def snake_case ( self ):
return SegformerConfig(
image_size=self.image_size ,num_channels=self.num_channels ,num_encoder_blocks=self.num_encoder_blocks ,depths=self.depths ,hidden_sizes=self.hidden_sizes ,num_attention_heads=self.num_attention_heads ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,initializer_range=self.initializer_range ,)
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : int = SegformerModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : List[str] = SegformerForSemanticSegmentation(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(snake_case__ )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(snake_case__ ,labels=snake_case__ )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss ,0.0 )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = 1
SCREAMING_SNAKE_CASE_ : List[str] = SegformerForSemanticSegmentation(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Any = torch.randint(0 ,1 ,(self.batch_size, self.image_size, self.image_size) ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : int = model(snake_case__ ,labels=snake_case__ )
self.parent.assertGreater(result.loss ,0.0 )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = config_and_inputs
SCREAMING_SNAKE_CASE_ : Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
__a : Any = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
__a : Union[str, Any] = (
{
"feature-extraction": SegformerModel,
"image-classification": SegformerForImageClassification,
"image-segmentation": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__a : Tuple = True
__a : List[Any] = False
__a : Any = False
__a : Tuple = False
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = SegformerModelTester(self )
SCREAMING_SNAKE_CASE_ : List[str] = SegformerConfigTester(self ,config_class=snake_case__ )
def snake_case ( self ):
self.config_tester.run_common_tests()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*snake_case__ )
@unittest.skip('SegFormer does not use inputs_embeds' )
def snake_case ( self ):
pass
@unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods' )
def snake_case ( self ):
pass
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Dict = model_class(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ : List[Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ : str = ['pixel_values']
self.assertListEqual(arg_names[:1] ,snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Dict = True
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Any = True
SCREAMING_SNAKE_CASE_ : Any = False
SCREAMING_SNAKE_CASE_ : Any = True
SCREAMING_SNAKE_CASE_ : Optional[int] = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Any = model(**self._prepare_for_class(snake_case__ ,snake_case__ ) )
SCREAMING_SNAKE_CASE_ : Dict = outputs.attentions
SCREAMING_SNAKE_CASE_ : Dict = sum(self.model_tester.depths )
self.assertEqual(len(snake_case__ ) ,snake_case__ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Optional[int] = model(**self._prepare_for_class(snake_case__ ,snake_case__ ) )
SCREAMING_SNAKE_CASE_ : str = outputs.attentions
self.assertEqual(len(snake_case__ ) ,snake_case__ )
# verify the first attentions (first block, first layer)
SCREAMING_SNAKE_CASE_ : List[str] = (self.model_tester.image_size // 4) ** 2
SCREAMING_SNAKE_CASE_ : Tuple = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] ,)
# verify the last attentions (last block, last layer)
SCREAMING_SNAKE_CASE_ : Any = (self.model_tester.image_size // 32) ** 2
SCREAMING_SNAKE_CASE_ : Tuple = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) ,[self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] ,)
SCREAMING_SNAKE_CASE_ : List[Any] = len(snake_case__ )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : str = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : str = model(**self._prepare_for_class(snake_case__ ,snake_case__ ) )
self.assertEqual(out_len + 1 ,len(snake_case__ ) )
SCREAMING_SNAKE_CASE_ : List[str] = outputs.attentions
self.assertEqual(len(snake_case__ ) ,snake_case__ )
# verify the first attentions (first block, first layer)
SCREAMING_SNAKE_CASE_ : Any = (self.model_tester.image_size // 4) ** 2
SCREAMING_SNAKE_CASE_ : Any = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] ,)
def snake_case ( self ):
def check_hidden_states_output(snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : int = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(**self._prepare_for_class(snake_case__ ,snake_case__ ) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = outputs.hidden_states
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.num_encoder_blocks
self.assertEqual(len(snake_case__ ) ,snake_case__ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) ,[
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] ,)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Tuple = True
check_hidden_states_output(snake_case__ ,snake_case__ ,snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ : Any = True
check_hidden_states_output(snake_case__ ,snake_case__ ,snake_case__ )
def snake_case ( self ):
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Tuple = True
for model_class in self.all_model_classes:
if model_class in get_values(snake_case__ ):
continue
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model_class(snake_case__ )
model.to(snake_case__ )
model.train()
SCREAMING_SNAKE_CASE_ : Any = self._prepare_for_class(snake_case__ ,snake_case__ ,return_labels=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = model(**snake_case__ ).loss
loss.backward()
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def snake_case ( self ):
pass
@slow
def snake_case ( self ):
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Any = SegformerModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def __UpperCAmelCase ( ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def snake_case ( self ):
# only resize + normalize
SCREAMING_SNAKE_CASE_ : List[str] = SegformerImageProcessor(
image_scale=(512, 512) ,keep_ratio=snake_case__ ,align=snake_case__ ,do_random_crop=snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = prepare_img()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_processor(images=snake_case__ ,return_tensors='pt' )
SCREAMING_SNAKE_CASE_ : Any = encoded_inputs.pixel_values.to(snake_case__ )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Optional[int] = model(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] ,snake_case__ ,atol=1E-4 ) )
@slow
def snake_case ( self ):
# only resize + normalize
SCREAMING_SNAKE_CASE_ : Dict = SegformerImageProcessor(
image_scale=(512, 512) ,keep_ratio=snake_case__ ,align=snake_case__ ,do_random_crop=snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = SegformerForSemanticSegmentation.from_pretrained(
'nvidia/segformer-b1-finetuned-cityscapes-1024-1024' ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = prepare_img()
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processor(images=snake_case__ ,return_tensors='pt' )
SCREAMING_SNAKE_CASE_ : int = encoded_inputs.pixel_values.to(snake_case__ )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] ,snake_case__ ,atol=1E-1 ) )
@slow
def snake_case ( self ):
# only resize + normalize
SCREAMING_SNAKE_CASE_ : List[str] = SegformerImageProcessor(
image_scale=(512, 512) ,keep_ratio=snake_case__ ,align=snake_case__ ,do_random_crop=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = prepare_img()
SCREAMING_SNAKE_CASE_ : Any = image_processor(images=snake_case__ ,return_tensors='pt' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = encoded_inputs.pixel_values.to(snake_case__ )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[Any] = model(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE_ : List[str] = image_processor.post_process_semantic_segmentation(outputs=snake_case__ ,target_sizes=[(500, 300)] )
SCREAMING_SNAKE_CASE_ : int = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape ,snake_case__ )
SCREAMING_SNAKE_CASE_ : int = image_processor.post_process_semantic_segmentation(outputs=snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape ,snake_case__ )
| 105
|
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase_ ( A__ : list , A__ : int , A__ : int , A__ : int ):
'''simple docstring'''
lowerCAmelCase_ : int = []
lowerCAmelCase_, lowerCAmelCase_ : Any = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
lowerCAmelCase_ : Dict = result + left + right
return input_list
def UpperCamelCase_ ( A__ : list ):
'''simple docstring'''
if len(A__ ) <= 1:
return input_list
lowerCAmelCase_ : str = list(A__ )
# iteration for two-way merging
lowerCAmelCase_ : Dict = 2
while p <= len(A__ ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(A__ ) , A__ ):
lowerCAmelCase_ : Tuple = i
lowerCAmelCase_ : List[str] = i + p - 1
lowerCAmelCase_ : Optional[Any] = (low + high + 1) // 2
lowerCAmelCase_ : List[str] = merge(A__ , A__ , A__ , A__ )
# final merge of last two parts
if p * 2 >= len(A__ ):
lowerCAmelCase_ : Union[str, Any] = i
lowerCAmelCase_ : Dict = merge(A__ , 0 , A__ , len(A__ ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
__A : int = input("Enter numbers separated by a comma:\n").strip()
if user_input == "":
__A : str = []
else:
__A : Optional[Any] = [int(item.strip()) for item in user_input.split(",")]
print(iter_merge_sort(unsorted))
| 275
| 0
|
from math import asin, atan, cos, radians, sin, sqrt, tan
__A : Union[str, Any] = 6_37_81_37.0
__A : Tuple = 6_35_67_52.31_42_45
__A : Union[str, Any] = 6_378_137
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> float:
'''simple docstring'''
__lowerCAmelCase = (AXIS_A - AXIS_B) / AXIS_A
__lowerCAmelCase = atan((1 - flattening) * tan(radians(UpperCamelCase__ ) ) )
__lowerCAmelCase = atan((1 - flattening) * tan(radians(UpperCamelCase__ ) ) )
__lowerCAmelCase = radians(UpperCamelCase__ )
__lowerCAmelCase = radians(UpperCamelCase__ )
# Equation
__lowerCAmelCase = sin((phi_a - phi_a) / 2 )
__lowerCAmelCase = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
__lowerCAmelCase = sqrt(sin_sq_phi + (cos(UpperCamelCase__ ) * cos(UpperCamelCase__ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 334
|
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> bool:
'''simple docstring'''
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(UpperCamelCase__ ) )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> bool:
'''simple docstring'''
if index == len(UpperCamelCase__ ):
return True
# Recursive Step
for i in range(UpperCamelCase__ ):
if valid_coloring(graph[index] , UpperCamelCase__ , UpperCamelCase__ ):
# Color current vertex
__lowerCAmelCase = i
# Validate coloring
if util_color(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , index + 1 ):
return True
# Backtrack
__lowerCAmelCase = -1
return False
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> list[int]:
'''simple docstring'''
__lowerCAmelCase = [-1] * len(UpperCamelCase__ )
if util_color(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , 0 ):
return colored_vertices
return []
| 334
| 1
|
'''simple docstring'''
class _lowerCamelCase :
'''simple docstring'''
def __init__( self , __lowercase ):
"""simple docstring"""
__A : Optional[int] = arr.split(',' )
def snake_case__ ( self ):
"""simple docstring"""
__A : List[str] = [int(self.array[0] )] * len(self.array )
__A : Union[str, Any] = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
__A : int = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
__A : List[str] = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = input('please input some numbers:')
UpperCAmelCase_ : str = SubArray(whole_array)
UpperCAmelCase_ : List[Any] = array.solve_sub_array()
print(('the results is:', re))
| 365
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Union[str, Any] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase_ : List[Any] = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
UpperCAmelCase_ : str = {'facebook/blenderbot-3B': 1_2_8}
class _lowerCamelCase ( snake_case_ ):
'''simple docstring'''
__lowercase : Optional[int] = VOCAB_FILES_NAMES
__lowercase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Tuple = ['''input_ids''', '''attention_mask''']
__lowercase : str = BlenderbotTokenizer
def __init__( self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase="replace" , __lowercase="<s>" , __lowercase="</s>" , __lowercase="</s>" , __lowercase="<s>" , __lowercase="<unk>" , __lowercase="<pad>" , __lowercase="<mask>" , __lowercase=False , __lowercase=True , **__lowercase , ):
"""simple docstring"""
super().__init__(
__lowercase , __lowercase , tokenizer_file=__lowercase , errors=__lowercase , bos_token=__lowercase , eos_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , unk_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , add_prefix_space=__lowercase , trim_offsets=__lowercase , **__lowercase , )
__A : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __lowercase ) != add_prefix_space:
__A : List[Any] = getattr(__lowercase , pre_tok_state.pop('type' ) )
__A : Any = add_prefix_space
__A : Any = pre_tok_class(**__lowercase )
__A : Union[str, Any] = add_prefix_space
__A : List[Any] = 'post_processor'
__A : Optional[Any] = getattr(self.backend_tokenizer , __lowercase , __lowercase )
if tokenizer_component_instance:
__A : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__A : Union[str, Any] = tuple(state['sep'] )
if "cls" in state:
__A : int = tuple(state['cls'] )
__A : Any = False
if state.get('add_prefix_space' , __lowercase ) != add_prefix_space:
__A : Any = add_prefix_space
__A : int = True
if state.get('trim_offsets' , __lowercase ) != trim_offsets:
__A : List[Any] = trim_offsets
__A : List[str] = True
if changes_to_apply:
__A : Optional[int] = getattr(__lowercase , state.pop('type' ) )
__A : Optional[int] = component_class(**__lowercase )
setattr(self.backend_tokenizer , __lowercase , __lowercase )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def snake_case__ ( self ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def snake_case__ ( self , __lowercase ):
"""simple docstring"""
__A : Union[str, Any] = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else value
__A : List[str] = value
def snake_case__ ( self , *__lowercase , **__lowercase ):
"""simple docstring"""
__A : Tuple = kwargs.get('is_split_into_words' , __lowercase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__lowercase , **__lowercase )
def snake_case__ ( self , *__lowercase , **__lowercase ):
"""simple docstring"""
__A : Any = kwargs.get('is_split_into_words' , __lowercase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__lowercase , **__lowercase )
def snake_case__ ( self , __lowercase , __lowercase = None ):
"""simple docstring"""
__A : str = self._tokenizer.model.save(__lowercase , name=__lowercase )
return tuple(__lowercase )
def snake_case__ ( self , __lowercase , __lowercase = None ):
"""simple docstring"""
__A : str = [self.sep_token_id]
__A : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case__ ( self , __lowercase , __lowercase = None ):
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def snake_case__ ( self , __lowercase ):
"""simple docstring"""
__A : List[str] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(__lowercase )
__A : Tuple = ' '.join(__lowercase )
__A : List[Any] = self.encode(__lowercase )
if len(__lowercase ) > self.model_max_length:
__A : Any = input_ids[-self.model_max_length :]
logger.warning(F"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 365
| 1
|
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
__UpperCAmelCase :Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
class a ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Any , snake_case : Dict , snake_case : List[Any] ) -> Dict:
super().__init__()
self.register_modules(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__( self : Dict , snake_case : int = 1 , snake_case : int = 100 , snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case : Optional[float] = None , snake_case : bool = True , ) -> Union[AudioPipelineOutput, Tuple]:
if audio_length_in_s is None:
__UpperCAmelCase : Dict = self.unet.config.sample_size / self.unet.config.sample_rate
__UpperCAmelCase : Tuple = audio_length_in_s * self.unet.config.sample_rate
__UpperCAmelCase : Any = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'
f' {3 * down_scale_factor / self.unet.config.sample_rate}.' )
__UpperCAmelCase : Optional[int] = int(lowerCamelCase_ )
if sample_size % down_scale_factor != 0:
__UpperCAmelCase : Optional[Any] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'
f' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'
''' process.''' )
__UpperCAmelCase : List[str] = int(lowerCamelCase_ )
__UpperCAmelCase : List[str] = next(iter(self.unet.parameters() ) ).dtype
__UpperCAmelCase : List[Any] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and len(lowerCamelCase_ ) != batch_size:
raise ValueError(
f'You have passed a list of generators of length {len(lowerCamelCase_ )}, but requested an effective batch'
f' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
__UpperCAmelCase : Tuple = randn_tensor(lowerCamelCase_ , generator=lowerCamelCase_ , device=self.device , dtype=lowerCamelCase_ )
# set step values
self.scheduler.set_timesteps(lowerCamelCase_ , device=audio.device )
__UpperCAmelCase : List[Any] = self.scheduler.timesteps.to(lowerCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__UpperCAmelCase : Dict = self.unet(lowerCamelCase_ , lowerCamelCase_ ).sample
# 2. compute previous image: x_t -> t_t-1
__UpperCAmelCase : Optional[int] = self.scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
__UpperCAmelCase : str = audio.clamp(-1 , 1 ).float().cpu().numpy()
__UpperCAmelCase : Any = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowerCamelCase_ )
| 718
|
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
__UpperCAmelCase :Tuple = "\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n"
__UpperCAmelCase :List[str] = "\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results['pearsonr'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n ['p-value', 'pearsonr']\n >>> print(round(results['pearsonr'], 2))\n -0.74\n >>> print(round(results['p-value'], 2))\n 0.15\n"
__UpperCAmelCase :List[Any] = "\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def lowerCamelCase__ ( self : List[Any] ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def lowerCamelCase__ ( self : Any , snake_case : Union[str, Any] , snake_case : int , snake_case : Dict=False ) -> List[Any]:
if return_pvalue:
__UpperCAmelCase : Tuple = pearsonr(snake_case , snake_case )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(snake_case , snake_case )[0] )}
| 266
| 0
|
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _UpperCamelCase ( A ):
UpperCamelCase_ =filter(lambda A : p.requires_grad , model.parameters() )
UpperCamelCase_ =sum([np.prod(p.size() ) for p in model_parameters] )
return params
A_ = logging.getLogger(__name__)
def _UpperCamelCase ( A , A ):
if metric == "rouge2":
UpperCamelCase_ ="{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
UpperCamelCase_ ="{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
UpperCamelCase_ ="{val_avg_em:.4f}-{step_count}"
elif metric == "loss":
UpperCamelCase_ ="{val_avg_loss:.4f}-{step_count}"
else:
raise NotImplementedError(
f"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
" function." )
UpperCamelCase_ =ModelCheckpoint(
dirpath=A , filename=A , monitor=f"""val_{metric}""" , mode="max" , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def _UpperCamelCase ( A , A ):
return EarlyStopping(
monitor=f"""val_{metric}""" , mode="min" if "loss" in metric else "max" , patience=A , verbose=A , )
class __lowerCAmelCase ( pl.Callback ):
'''simple docstring'''
def UpperCamelCase__ ( self: Union[str, Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: List[str] ):
UpperCamelCase_ ={f"""lr_group_{i}""": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(UpperCamelCase__ )
@rank_zero_only
def UpperCamelCase__ ( self: Optional[int] , UpperCamelCase_: pl.Trainer , UpperCamelCase_: pl.LightningModule , UpperCamelCase_: str , UpperCamelCase_: Any=True ):
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
UpperCamelCase_ =trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
UpperCamelCase_ =Path(pl_module.hparams.output_dir )
if type_path == "test":
UpperCamelCase_ =od / "test_results.txt"
UpperCamelCase_ =od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
UpperCamelCase_ =od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
UpperCamelCase_ =od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=UpperCamelCase__ )
generations_file.parent.mkdir(exist_ok=UpperCamelCase__ )
with open(UpperCamelCase__ , "a+" ) as writer:
for key in sorted(UpperCamelCase__ ):
if key in ["log", "progress_bar", "preds"]:
continue
UpperCamelCase_ =metrics[key]
if isinstance(UpperCamelCase__ , torch.Tensor ):
UpperCamelCase_ =val.item()
UpperCamelCase_ =f"""{key}: {val:.6f}\n"""
writer.write(UpperCamelCase__ )
if not save_generations:
return
if "preds" in metrics:
UpperCamelCase_ ="\n".join(metrics["preds"] )
generations_file.open("w+" ).write(UpperCamelCase__ )
@rank_zero_only
def UpperCamelCase__ ( self: Optional[int] , UpperCamelCase_: Any , UpperCamelCase_: Union[str, Any] ):
try:
UpperCamelCase_ =pl_module.model.model.num_parameters()
except AttributeError:
UpperCamelCase_ =pl_module.model.num_parameters()
UpperCamelCase_ =count_trainable_parameters(UpperCamelCase__ )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def UpperCamelCase__ ( self: Any , UpperCamelCase_: pl.Trainer , UpperCamelCase_: pl.LightningModule ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(UpperCamelCase__ , UpperCamelCase__ , "test" )
@rank_zero_only
def UpperCamelCase__ ( self: int , UpperCamelCase_: pl.Trainer , UpperCamelCase_: str ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 391
|
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__A : Optional[int] = logging.get_logger(__name__)
__A : Optional[int] = {"vocab_file": "spiece.model"}
__A : List[Any] = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Dict="<s>" , UpperCamelCase__ : str="</s>" , UpperCamelCase__ : Union[str, Any]="<unk>" , UpperCamelCase__ : Optional[int]="<sep>" , UpperCamelCase__ : Optional[int]="<pad>" , UpperCamelCase__ : Optional[int]="<cls>" , UpperCamelCase__ : List[str]="<mask>" , UpperCamelCase__ : Optional[Any]=["<eop>", "<eod>"] , UpperCamelCase__ : Optional[Dict[str, Any]] = None , **UpperCamelCase__ : Dict , ):
A__ : List[str] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
A__ : Tuple ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
A__ : Dict =3
A__ : int =do_lower_case
A__ : str =remove_space
A__ : Optional[Any] =keep_accents
A__ : int =vocab_file
A__ : Dict =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." )
A__ : Union[str, Any] =jieba
A__ : List[str] =str.maketrans(" \n" , "\u2582\u2583" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _UpperCAmelCase ( self : Union[str, Any] ):
return len(self.sp_model )
def _UpperCAmelCase ( self : Optional[int] ):
A__ : Any ={self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ):
A__ : Union[str, Any] =self.__dict__.copy()
A__ : Tuple =None
return state
def __setstate__( self : Tuple , UpperCamelCase__ : int ):
A__ : Union[str, Any] =d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
A__ : Optional[int] ={}
A__ : Union[str, Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : Dict ):
if self.remove_space:
A__ : Optional[int] =" ".join(inputs.strip().split() )
else:
A__ : Optional[Any] =inputs
A__ : Any =outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
A__ : Optional[Any] =unicodedata.normalize("NFKD" , UpperCamelCase__ )
A__ : Tuple ="".join([c for c in outputs if not unicodedata.combining(UpperCamelCase__ )] )
if self.do_lower_case:
A__ : str =outputs.lower()
return outputs
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : str ):
A__ : Optional[int] =self.preprocess_text(UpperCamelCase__ )
A__ : Dict =self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
A__ : List[str] =[]
for piece in pieces:
if len(UpperCamelCase__ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
A__ : str =self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase__ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
A__ : Union[str, Any] =cur_pieces[1:]
else:
A__ : List[str] =cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCamelCase__ )
else:
new_pieces.append(UpperCamelCase__ )
return new_pieces
def _UpperCAmelCase ( self : int , UpperCamelCase__ : str ):
return self.sp_model.PieceToId(UpperCamelCase__ )
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : List[Any] ):
return self.sp_model.IdToPiece(UpperCamelCase__ )
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : str ):
A__ : Optional[int] ="".join(UpperCamelCase__ ).replace(UpperCamelCase__ , " " ).strip()
return out_string
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
A__ : List[str] =[self.sep_token_id]
A__ : str =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is not None:
return ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1, 1]
return ([0] * len(UpperCamelCase__ )) + [1, 1]
def _UpperCAmelCase ( self : int , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
A__ : List[str] =[self.sep_token_id]
A__ : Optional[Any] =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
A__ : Tuple =os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , "wb" ) as fi:
A__ : Optional[Any] =self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
def _UpperCAmelCase ( self : str , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : int ):
A__ : List[Any] =super()._decode(*UpperCamelCase__ , **UpperCamelCase__ )
A__ : Union[str, Any] =text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" )
return text
| 656
| 0
|
from __future__ import annotations
from statistics import mean
def UpperCAmelCase__ ( _A , _A , _A ):
"""simple docstring"""
a_ = [0] * no_of_processes
a_ = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(_A ):
a_ = burst_time[i]
a_ = []
a_ = 0
a_ = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
a_ = []
a_ = -1
for i in range(_A ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(_A )
if len(_A ) > 0:
a_ = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
a_ = i
total_time += burst_time[target_process]
completed += 1
a_ = 0
a_ = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def UpperCAmelCase__ ( _A , _A , _A ):
"""simple docstring"""
a_ = [0] * no_of_processes
for i in range(_A ):
a_ = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('''[TEST CASE 01]''')
UpperCamelCase__ = 4
UpperCamelCase__ = [2, 5, 3, 7]
UpperCamelCase__ = [0, 0, 0, 0]
UpperCamelCase__ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
UpperCamelCase__ = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('''PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time''')
for i, process_id in enumerate(list(range(1, 5))):
print(
F"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"""
F"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"""
)
print(F"""\nAverage waiting time = {mean(waiting_time):.5f}""")
print(F"""Average turnaround time = {mean(turn_around_time):.5f}""")
| 143
|
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
UpperCamelCase__ = logging.getLogger(__name__)
def UpperCAmelCase__ ( _A , _A ):
"""simple docstring"""
a_ = np.argmax(_A , axis=1 )
return np.sum(outputs == labels )
def UpperCAmelCase__ ( _A ):
"""simple docstring"""
with open(_A , encoding='''utf_8''' ) as f:
a_ = csv.reader(_A )
a_ = []
next(_A ) # skip the first line
for line in tqdm(_A ):
output.append((''' '''.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def UpperCAmelCase__ ( _A , _A , _A , _A , _A , _A ):
"""simple docstring"""
a_ = []
for dataset in encoded_datasets:
a_ = len(_A )
a_ = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
a_ = np.zeros((n_batch, 2) , dtype=np.intaa )
a_ = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
a_ = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(_A ):
a_ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
a_ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
a_ = with_conta
a_ = with_conta
a_ = len(_A ) - 1
a_ = len(_A ) - 1
a_ = with_conta
a_ = with_conta
a_ = mc_label
a_ = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(_A ) for t in all_inputs ) )
return tensor_datasets
def UpperCAmelCase__ ( ):
"""simple docstring"""
a_ = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=_A , default='''openai-gpt''' , help='''pretrained model name''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_eval''' , action='''store_true''' , help='''Whether to run eval on the dev set.''' )
parser.add_argument(
'''--output_dir''' , default=_A , type=_A , required=_A , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument('''--train_dataset''' , type=_A , default='''''' )
parser.add_argument('''--eval_dataset''' , type=_A , default='''''' )
parser.add_argument('''--seed''' , type=_A , default=42 )
parser.add_argument('''--num_train_epochs''' , type=_A , default=3 )
parser.add_argument('''--train_batch_size''' , type=_A , default=8 )
parser.add_argument('''--eval_batch_size''' , type=_A , default=16 )
parser.add_argument('''--adam_epsilon''' , default=1e-8 , type=_A , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , type=_A , default=1 )
parser.add_argument(
'''--max_steps''' , default=-1 , type=_A , help=(
'''If > 0: set total number of training steps to perform. Override num_train_epochs.'''
) , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=_A , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--learning_rate''' , type=_A , default=6.2_5e-5 )
parser.add_argument('''--warmup_steps''' , default=0 , type=_A , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--lr_schedule''' , type=_A , default='''warmup_linear''' )
parser.add_argument('''--weight_decay''' , type=_A , default=0.01 )
parser.add_argument('''--lm_coef''' , type=_A , default=0.9 )
parser.add_argument('''--n_valid''' , type=_A , default=374 )
parser.add_argument('''--server_ip''' , type=_A , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=_A , default='''''' , help='''Can be used for distant debugging.''' )
a_ = parser.parse_args()
print(_A )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_A )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
a_ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
a_ = torch.cuda.device_count()
logger.info('''device: {}, n_gpu {}'''.format(_A , _A ) )
if not args.do_train and not args.do_eval:
raise ValueError('''At least one of `do_train` or `do_eval` must be True.''' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
a_ = ['''_start_''', '''_delimiter_''', '''_classify_''']
a_ = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(_A )
a_ = tokenizer.convert_tokens_to_ids(_A )
a_ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(_A ) )
model.to(_A )
# Load and encode the datasets
def tokenize_and_encode(_A ):
if isinstance(_A , _A ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(_A ) )
elif isinstance(_A , _A ):
return obj
return [tokenize_and_encode(_A ) for o in obj]
logger.info('''Encoding dataset...''' )
a_ = load_rocstories_dataset(args.train_dataset )
a_ = load_rocstories_dataset(args.eval_dataset )
a_ = (train_dataset, eval_dataset)
a_ = tokenize_and_encode(_A )
# Compute the max input length for the Transformer
a_ = model.config.n_positions // 2 - 2
a_ = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
a_ = min(_A , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
a_ = pre_process_datasets(_A , _A , _A , *_A )
a_ , a_ = tensor_datasets[0], tensor_datasets[1]
a_ = TensorDataset(*_A )
a_ = RandomSampler(_A )
a_ = DataLoader(_A , sampler=_A , batch_size=args.train_batch_size )
a_ = TensorDataset(*_A )
a_ = SequentialSampler(_A )
a_ = DataLoader(_A , sampler=_A , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
a_ = args.max_steps
a_ = args.max_steps // (len(_A ) // args.gradient_accumulation_steps) + 1
else:
a_ = len(_A ) // args.gradient_accumulation_steps * args.num_train_epochs
a_ = list(model.named_parameters() )
a_ = ['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight''']
a_ = [
{
'''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'''weight_decay''': args.weight_decay,
},
{'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0},
]
a_ = AdamW(_A , lr=args.learning_rate , eps=args.adam_epsilon )
a_ = get_linear_schedule_with_warmup(
_A , num_warmup_steps=args.warmup_steps , num_training_steps=_A )
if args.do_train:
a_ , a_ , a_ = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='''Epoch''' ):
a_ = 0
a_ = 0
a_ = tqdm(_A , desc='''Training''' )
for step, batch in enumerate(_A ):
a_ = tuple(t.to(_A ) for t in batch )
a_ , a_ , a_ , a_ = batch
a_ = model(_A , mc_token_ids=_A , lm_labels=_A , mc_labels=_A )
a_ = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
a_ = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
a_ = '''Training loss: {:.2e} lr: {:.2e}'''.format(_A , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
a_ = model.module if hasattr(_A , '''module''' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
a_ = os.path.join(args.output_dir , _A )
a_ = os.path.join(args.output_dir , _A )
torch.save(model_to_save.state_dict() , _A )
model_to_save.config.to_json_file(_A )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
a_ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
a_ = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(_A )
if args.do_eval:
model.eval()
a_ , a_ = 0, 0
a_ , a_ = 0, 0
for batch in tqdm(_A , desc='''Evaluating''' ):
a_ = tuple(t.to(_A ) for t in batch )
a_ , a_ , a_ , a_ = batch
with torch.no_grad():
a_ , a_ , a_ , a_ = model(
_A , mc_token_ids=_A , lm_labels=_A , mc_labels=_A )
a_ = mc_logits.detach().cpu().numpy()
a_ = mc_labels.to('''cpu''' ).numpy()
a_ = accuracy(_A , _A )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
a_ = eval_loss / nb_eval_steps
a_ = eval_accuracy / nb_eval_examples
a_ = tr_loss / nb_tr_steps if args.do_train else None
a_ = {'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss}
a_ = os.path.join(args.output_dir , '''eval_results.txt''' )
with open(_A , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , _A , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 143
| 1
|
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : List[Any] = """ClapFeatureExtractor"""
_SCREAMING_SNAKE_CASE : Tuple = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any ) -> Optional[Any]:
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __call__( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]:
__lowerCAmelCase = kwargs.pop("""sampling_rate""" , SCREAMING_SNAKE_CASE__ )
if text is None and audios is None:
raise ValueError("""You have to specify either text or audios. Both cannot be none.""" )
if text is not None:
__lowerCAmelCase = self.tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if audios is not None:
__lowerCAmelCase = self.feature_extractor(
SCREAMING_SNAKE_CASE__ , sampling_rate=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if text is not None and audios is not None:
__lowerCAmelCase = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE__ ) , tensor_type=SCREAMING_SNAKE_CASE__ )
def a ( self : Optional[int] , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]:
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def a ( self : Any , *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : Tuple ) -> str:
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def a ( self : Optional[Any] ) -> int:
__lowerCAmelCase = self.tokenizer.model_input_names
__lowerCAmelCase = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 427
|
'''simple docstring'''
import random
from .binary_exp_mod import bin_exp_mod
def UpperCamelCase_ ( snake_case_ : str , snake_case_ : List[str]=10_00 ) -> Dict:
'''simple docstring'''
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
__lowerCAmelCase = n - 1
__lowerCAmelCase = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
__lowerCAmelCase = 0
while count < prec:
__lowerCAmelCase = random.randint(2 , n - 1 )
__lowerCAmelCase = bin_exp_mod(snake_case_ , snake_case_ , snake_case_ )
if b != 1:
__lowerCAmelCase = True
for _ in range(snake_case_ ):
if b == n - 1:
__lowerCAmelCase = False
break
__lowerCAmelCase = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_A : Union[str, Any] = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 427
| 1
|
from __future__ import annotations
from typing import Any
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase__ : int = 6 ) -> None:
"""simple docstring"""
__magic_name__ = None
__magic_name__ = None
self.create_linked_list(UpperCamelCase__ )
def _lowercase ( self : Optional[int] , UpperCamelCase__ : int ) -> None:
"""simple docstring"""
__magic_name__ = Node()
__magic_name__ = current_node
__magic_name__ = current_node
__magic_name__ = current_node
for _ in range(1 , UpperCamelCase__ ):
__magic_name__ = Node()
__magic_name__ = current_node
__magic_name__ = previous_node
__magic_name__ = current_node
__magic_name__ = self.front
__magic_name__ = previous_node
def _lowercase ( self : Optional[int] ) -> bool:
"""simple docstring"""
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def _lowercase ( self : Optional[int] ) -> Any | None:
"""simple docstring"""
self.check_can_perform_operation()
return self.front.data if self.front else None
def _lowercase ( self : List[Any] , UpperCamelCase__ : Any ) -> None:
"""simple docstring"""
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
__magic_name__ = self.rear.next
if self.rear:
__magic_name__ = data
def _lowercase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
__magic_name__ = self.front.data
__magic_name__ = None
return data
__magic_name__ = self.front
__magic_name__ = old_front.next
__magic_name__ = old_front.data
__magic_name__ = None
return data
def _lowercase ( self : Optional[Any] ) -> None:
"""simple docstring"""
if self.is_empty():
raise Exception("""Empty Queue""" )
def _lowercase ( self : str ) -> None:
"""simple docstring"""
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""" )
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : int ) -> None:
"""simple docstring"""
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715
|
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class UpperCAmelCase_ :
'''simple docstring'''
a__ = None
def _lowercase ( self : Optional[int] ) -> str:
"""simple docstring"""
__magic_name__ = self.feature_extraction_class(**self.feat_extract_dict )
__magic_name__ = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , UpperCamelCase__ )
def _lowercase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__magic_name__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__magic_name__ = os.path.join(UpperCamelCase__ , """feat_extract.json""" )
feat_extract_first.to_json_file(UpperCamelCase__ )
__magic_name__ = self.feature_extraction_class.from_json_file(UpperCamelCase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def _lowercase ( self : str ) -> str:
"""simple docstring"""
__magic_name__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__magic_name__ = feat_extract_first.save_pretrained(UpperCamelCase__ )[0]
check_json_file_has_correct_format(UpperCamelCase__ )
__magic_name__ = self.feature_extraction_class.from_pretrained(UpperCamelCase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def _lowercase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__magic_name__ = self.feature_extraction_class()
self.assertIsNotNone(UpperCamelCase__ )
| 76
| 0
|
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
__a : Union[str, Any] = logging.get_logger(__name__)
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
lowercase__ : Optional[Any] = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
lowercase__ : Optional[int] = 1_28
elif "12-12" in model_name:
lowercase__ : List[Any] = 12
lowercase__ : Union[str, Any] = 12
elif "14-14" in model_name:
lowercase__ : Any = 14
lowercase__ : Optional[int] = 14
elif "16-16" in model_name:
lowercase__ : List[Any] = 16
lowercase__ : List[str] = 16
else:
raise ValueError("Model not supported" )
lowercase__ : List[str] = "huggingface/label-files"
if "speech-commands" in model_name:
lowercase__ : Union[str, Any] = 35
lowercase__ : int = "speech-commands-v2-id2label.json"
else:
lowercase__ : List[str] = 5_27
lowercase__ : Union[str, Any] = "audioset-id2label.json"
lowercase__ : int = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,repo_type="dataset" ) ,"r" ) )
lowercase__ : Any = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
lowercase__ : List[Any] = idalabel
lowercase__ : Optional[int] = {v: k for k, v in idalabel.items()}
return config
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Any:
if "module.v" in name:
lowercase__ : Optional[Any] = name.replace("module.v" ,"audio_spectrogram_transformer" )
if "cls_token" in name:
lowercase__ : List[str] = name.replace("cls_token" ,"embeddings.cls_token" )
if "dist_token" in name:
lowercase__ : Union[str, Any] = name.replace("dist_token" ,"embeddings.distillation_token" )
if "pos_embed" in name:
lowercase__ : List[str] = name.replace("pos_embed" ,"embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowercase__ : Union[str, Any] = name.replace("patch_embed.proj" ,"embeddings.patch_embeddings.projection" )
# transformer blocks
if "blocks" in name:
lowercase__ : Any = name.replace("blocks" ,"encoder.layer" )
if "attn.proj" in name:
lowercase__ : List[Any] = name.replace("attn.proj" ,"attention.output.dense" )
if "attn" in name:
lowercase__ : int = name.replace("attn" ,"attention.self" )
if "norm1" in name:
lowercase__ : Union[str, Any] = name.replace("norm1" ,"layernorm_before" )
if "norm2" in name:
lowercase__ : Any = name.replace("norm2" ,"layernorm_after" )
if "mlp.fc1" in name:
lowercase__ : int = name.replace("mlp.fc1" ,"intermediate.dense" )
if "mlp.fc2" in name:
lowercase__ : Optional[int] = name.replace("mlp.fc2" ,"output.dense" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
lowercase__ : Any = name.replace("audio_spectrogram_transformer.norm" ,"audio_spectrogram_transformer.layernorm" )
# classifier head
if "module.mlp_head.0" in name:
lowercase__ : Dict = name.replace("module.mlp_head.0" ,"classifier.layernorm" )
if "module.mlp_head.1" in name:
lowercase__ : Dict = name.replace("module.mlp_head.1" ,"classifier.dense" )
return name
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> str:
for key in orig_state_dict.copy().keys():
lowercase__ : Dict = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "qkv" in key:
lowercase__ : int = key.split("." )
lowercase__ : Tuple = int(key_split[3] )
lowercase__ : str = config.hidden_size
if "weight" in key:
lowercase__ : Optional[Any] = val[:dim, :]
lowercase__ : List[str] = val[dim : dim * 2, :]
lowercase__ : Tuple = val[-dim:, :]
else:
lowercase__ : Dict = val[:dim]
lowercase__ : Any = val[dim : dim * 2]
lowercase__ : str = val[-dim:]
else:
lowercase__ : Union[str, Any] = val
return orig_state_dict
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> int:
lowercase__ : int = [
"module.v.head.weight",
"module.v.head.bias",
"module.v.head_dist.weight",
"module.v.head_dist.bias",
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=False ) -> Optional[Any]:
lowercase__ : Any = get_audio_spectrogram_transformer_config(SCREAMING_SNAKE_CASE_ )
lowercase__ : int = {
"ast-finetuned-audioset-10-10-0.4593": (
"https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.450": (
"https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448": (
"https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448-v2": (
"https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"
),
"ast-finetuned-audioset-12-12-0.447": (
"https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"
),
"ast-finetuned-audioset-14-14-0.443": (
"https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"
),
"ast-finetuned-audioset-16-16-0.442": (
"https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"
),
"ast-finetuned-speech-commands-v2": (
"https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"
),
}
# load original state_dict
lowercase__ : Dict = model_name_to_url[model_name]
lowercase__ : str = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ ,map_location="cpu" )
# remove some keys
remove_keys(SCREAMING_SNAKE_CASE_ )
# rename some keys
lowercase__ : List[str] = convert_state_dict(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
# load 🤗 model
lowercase__ : Optional[int] = ASTForAudioClassification(SCREAMING_SNAKE_CASE_ )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
lowercase__ : Any = -4.267_7393 if "speech-commands" not in model_name else -6.84_5978
lowercase__ : str = 4.568_9974 if "speech-commands" not in model_name else 5.565_4526
lowercase__ : Union[str, Any] = 10_24 if "speech-commands" not in model_name else 1_28
lowercase__ : Optional[Any] = ASTFeatureExtractor(mean=SCREAMING_SNAKE_CASE_ ,std=SCREAMING_SNAKE_CASE_ ,max_length=SCREAMING_SNAKE_CASE_ )
if "speech-commands" in model_name:
lowercase__ : Optional[int] = load_dataset("speech_commands" ,"v0.02" ,split="validation" )
lowercase__ : str = dataset[0]["audio"]["array"]
else:
lowercase__ : Optional[int] = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" ,filename="sample_audio.flac" ,repo_type="dataset" ,)
lowercase__ , lowercase__ : List[Any] = torchaudio.load(SCREAMING_SNAKE_CASE_ )
lowercase__ : Optional[int] = waveform.squeeze().numpy()
lowercase__ : Dict = feature_extractor(SCREAMING_SNAKE_CASE_ ,sampling_rate=1_60_00 ,return_tensors="pt" )
# forward pass
lowercase__ : Optional[int] = model(**SCREAMING_SNAKE_CASE_ )
lowercase__ : int = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
lowercase__ : Optional[Any] = torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
lowercase__ : Dict = torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
lowercase__ : Optional[int] = torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
lowercase__ : List[Any] = torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
lowercase__ : int = torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
lowercase__ : Union[str, Any] = torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
lowercase__ : List[str] = torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
lowercase__ : Dict = torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError("Unknown model name" )
if not torch.allclose(logits[0, :3] ,SCREAMING_SNAKE_CASE_ ,atol=1E-4 ):
raise ValueError("Logits don't match" )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(F"""Saving feature extractor to {pytorch_dump_folder_path}""" )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print("Pushing model and feature extractor to the hub..." )
model.push_to_hub(F"""MIT/{model_name}""" )
feature_extractor.push_to_hub(F"""MIT/{model_name}""" )
if __name__ == "__main__":
__a : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''ast-finetuned-audioset-10-10-0.4593''',
type=str,
help='''Name of the Audio Spectrogram Transformer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__a : str = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 397
|
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a : Optional[Any] = {
'''configuration_cpmant''': ['''CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CpmAntConfig'''],
'''tokenization_cpmant''': ['''CpmAntTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Any = [
'''CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CpmAntForCausalLM''',
'''CpmAntModel''',
'''CpmAntPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
__a : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 397
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
snake_case_ : Tuple = logging.get_logger(__name__)
snake_case_ : Optional[Any] = "▁"
snake_case_ : Dict = {"vocab_file": "sentencepiece.bpe.model"}
snake_case_ : str = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model"
),
}
}
snake_case_ : str = {
"facebook/nllb-200-distilled-600M": 1024,
}
# fmt: off
snake_case_ : str = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class snake_case_ ( __A ):
'''simple docstring'''
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = ["input_ids", "attention_mask"]
lowerCamelCase = []
lowerCamelCase = []
def __init__( self : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : List[str]="<s>" , __magic_name__ : Optional[int]="</s>" , __magic_name__ : str="</s>" , __magic_name__ : str="<s>" , __magic_name__ : Optional[Any]="<unk>" , __magic_name__ : Tuple="<pad>" , __magic_name__ : Dict="<mask>" , __magic_name__ : int=None , __magic_name__ : Optional[int]=None , __magic_name__ : Any=None , __magic_name__ : Optional[Dict[str, Any]] = None , __magic_name__ : int=None , __magic_name__ : List[Any]=False , **__magic_name__ : Any , ) -> Optional[int]:
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ : Tuple = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else mask_token
lowerCamelCase_ : int = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCamelCase_ : int = legacy_behaviour
super().__init__(
bos_token=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , tokenizer_file=__magic_name__ , src_lang=__magic_name__ , tgt_lang=__magic_name__ , additional_special_tokens=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=__magic_name__ , **__magic_name__ , )
lowerCamelCase_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__magic_name__ ) )
lowerCamelCase_ : Optional[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCamelCase_ : List[str] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCamelCase_ : Tuple = 1
lowerCamelCase_ : List[str] = len(self.sp_model )
lowerCamelCase_ : Tuple = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__magic_name__ )
}
lowerCamelCase_ : int = {v: k for k, v in self.lang_code_to_id.items()}
lowerCamelCase_ : str = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
lowerCamelCase_ : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowerCamelCase_ : Optional[Any] = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
lowerCamelCase_ : int = src_lang if src_lang is not None else "eng_Latn"
lowerCamelCase_ : Optional[int] = self.lang_code_to_id[self._src_lang]
lowerCamelCase_ : Union[str, Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Optional[int] ) -> Optional[int]:
lowerCamelCase_ : Optional[int] = self.__dict__.copy()
lowerCamelCase_ : Optional[int] = None
lowerCamelCase_ : Tuple = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Tuple , __magic_name__ : List[str] ) -> Optional[Any]:
lowerCamelCase_ : Optional[int] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase_ : Union[str, Any] = {}
lowerCamelCase_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
return self._src_lang
@src_lang.setter
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : str ) -> None:
lowerCamelCase_ : Optional[int] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None , __magic_name__ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ )
lowerCamelCase_ : Any = [1] * len(self.prefix_tokens )
lowerCamelCase_ : Optional[int] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__magic_name__ )) + suffix_ones
return prefix_ones + ([0] * len(__magic_name__ )) + ([0] * len(__magic_name__ )) + suffix_ones
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]:
lowerCamelCase_ : Tuple = [self.sep_token_id]
lowerCamelCase_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : str , __magic_name__ : Optional[str] , __magic_name__ : Optional[str] , **__magic_name__ : Optional[Any] ) -> Tuple:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
lowerCamelCase_ : List[Any] = src_lang
lowerCamelCase_ : Any = self(__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
lowerCamelCase_ : int = self.convert_tokens_to_ids(__magic_name__ )
lowerCamelCase_ : Optional[int] = tgt_lang_id
return inputs
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
lowerCamelCase_ : str = {self.convert_ids_to_tokens(__magic_name__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str ) -> List[str]:
return self.sp_model.encode(__magic_name__ , out_type=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : Any ) -> List[Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCamelCase_ : List[str] = self.sp_model.PieceToId(__magic_name__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : List[str] ) -> Any:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : List[Any] ) -> Optional[Any]:
lowerCamelCase_ : int = "".join(__magic_name__ ).replace(__magic_name__ , " " ).strip()
return out_string
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__magic_name__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCamelCase_ : List[str] = os.path.join(
__magic_name__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __magic_name__ )
elif not os.path.isfile(self.vocab_file ):
with open(__magic_name__ , "wb" ) as fi:
lowerCamelCase_ : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(__magic_name__ )
return (out_vocab_file,)
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : List[str] , __magic_name__ : str = "eng_Latn" , __magic_name__ : Optional[List[str]] = None , __magic_name__ : str = "fra_Latn" , **__magic_name__ : Tuple , ) -> BatchEncoding:
lowerCamelCase_ : Dict = src_lang
lowerCamelCase_ : Tuple = tgt_lang
return super().prepare_seqaseq_batch(__magic_name__ , __magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Any:
return self.set_src_lang_special_tokens(self.src_lang )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : int ) -> None:
lowerCamelCase_ : Optional[int] = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
lowerCamelCase_ : int = []
lowerCamelCase_ : int = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase_ : Optional[Any] = [self.cur_lang_code]
lowerCamelCase_ : Union[str, Any] = [self.eos_token_id]
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : str ) -> None:
lowerCamelCase_ : int = self.lang_code_to_id[lang]
if self.legacy_behaviour:
lowerCamelCase_ : Dict = []
lowerCamelCase_ : str = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase_ : Union[str, Any] = [self.cur_lang_code]
lowerCamelCase_ : List[str] = [self.eos_token_id]
| 253
|
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case_ ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = LayoutLMTokenizer
lowerCamelCase = LayoutLMTokenizerFast
lowerCamelCase = True
lowerCamelCase = True
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
super().setUp()
lowerCamelCase_ : int = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowerCamelCase_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def __SCREAMING_SNAKE_CASE ( self : List[str] , **__magic_name__ : Dict ) -> List[str]:
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : List[Any] ) -> Union[str, Any]:
lowerCamelCase_ : str = "UNwant\u00E9d,running"
lowerCamelCase_ : List[Any] = "unwanted, running"
return input_text, output_text
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
lowerCamelCase_ : Dict = self.tokenizer_class(self.vocab_file )
lowerCamelCase_ : List[Any] = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(__magic_name__ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , [7, 4, 5, 10, 8, 9] )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
pass
| 253
| 1
|
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
class __UpperCAmelCase ( __A ):
"""simple docstring"""
def snake_case_ ( self , __A , __A , __A=None , __A=None ):
__a = self.layer[current_layer](__A , __A , head_mask[current_layer] )
__a = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"""The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.""" , __A , )
class __UpperCAmelCase ( __A ):
"""simple docstring"""
def __init__( self , __A ):
super().__init__(__A )
__a = BertEncoderWithPabee(__A )
self.init_weights()
__a = 0
__a = 0
__a = 0
__a = 0
def snake_case_ ( self , __A ):
__a = threshold
def snake_case_ ( self , __A ):
__a = patience
def snake_case_ ( self ):
__a = 0
__a = 0
def snake_case_ ( self ):
__a = self.inference_layers_num / self.inference_instances_num
__a = (
f'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
f''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(__A )
@add_start_docstrings_to_model_forward(__A )
def snake_case_ ( self , __A=None , __A=None , __A=None , __A=None , __A=None , __A=None , __A=None , __A=None , __A=None , __A=None , __A=False , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
__a = input_ids.size()
elif inputs_embeds is not None:
__a = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
__a = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__a = torch.ones(__A , device=__A )
if token_type_ids is None:
__a = torch.zeros(__A , dtype=torch.long , device=__A )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__a = self.get_extended_attention_mask(__A , __A , __A )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
__a , __a , __a = encoder_hidden_states.size()
__a = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
__a = torch.ones(__A , device=__A )
__a = self.invert_attention_mask(__A )
else:
__a = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__a = self.get_head_mask(__A , self.config.num_hidden_layers )
__a = self.embeddings(
input_ids=__A , position_ids=__A , token_type_ids=__A , inputs_embeds=__A )
__a = embedding_output
if self.training:
__a = []
for i in range(self.config.num_hidden_layers ):
__a = self.encoder.adaptive_forward(
__A , current_layer=__A , attention_mask=__A , head_mask=__A )
__a = self.pooler(__A )
__a = output_layers[i](output_dropout(__A ) )
res.append(__A )
elif self.patience == 0: # Use all layers for inference
__a = self.encoder(
__A , attention_mask=__A , head_mask=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , )
__a = self.pooler(encoder_outputs[0] )
__a = [output_layers[self.config.num_hidden_layers - 1](__A )]
else:
__a = 0
__a = None
__a = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
__a = self.encoder.adaptive_forward(
__A , current_layer=__A , attention_mask=__A , head_mask=__A )
__a = self.pooler(__A )
__a = output_layers[i](__A )
if regression:
__a = logits.detach()
if patient_result is not None:
__a = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
__a = 0
else:
__a = logits.detach().argmax(dim=1 )
if patient_result is not None:
__a = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(__A ) ):
patient_counter += 1
else:
__a = 0
__a = logits
if patient_counter == self.patience:
break
__a = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"""Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """ , __A , )
class __UpperCAmelCase ( __A ):
"""simple docstring"""
def __init__( self , __A ):
super().__init__(__A )
__a = config.num_labels
__a = BertModelWithPabee(__A )
__a = nn.Dropout(config.hidden_dropout_prob )
__a = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(__A )
def snake_case_ ( self , __A=None , __A=None , __A=None , __A=None , __A=None , __A=None , __A=None , ):
__a = self.bert(
input_ids=__A , attention_mask=__A , token_type_ids=__A , position_ids=__A , head_mask=__A , inputs_embeds=__A , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
__a = (logits[-1],)
if labels is not None:
__a = None
__a = 0
for ix, logits_item in enumerate(__A ):
if self.num_labels == 1:
# We are doing regression
__a = MSELoss()
__a = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
__a = CrossEntropyLoss()
__a = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
__a = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
__a = (total_loss / total_weights,) + outputs
return outputs
| 99
|
import math
def a (lowerCAmelCase__ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a (lowerCAmelCase__ = 10_001 ):
try:
__a = int(lowerCAmelCase__ )
except (TypeError, ValueError):
raise TypeError("""Parameter nth must be int or castable to int.""" ) from None
if nth <= 0:
raise ValueError("""Parameter nth must be greater than or equal to one.""" )
__a = []
__a = 2
while len(lowerCAmelCase__ ) < nth:
if is_prime(lowerCAmelCase__ ):
primes.append(lowerCAmelCase__ )
num += 1
else:
num += 1
return primes[len(lowerCAmelCase__ ) - 1]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 99
| 1
|
'''simple docstring'''
def __lowerCamelCase ( _UpperCamelCase : int ):
'''simple docstring'''
if length <= 0 or not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError('''Length must be a positive integer.''' )
return [n * (2 * n - 1) for n in range(_UpperCamelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 43
|
'''simple docstring'''
import comet # From: unbabel-comet
import torch
import datasets
lowercase__ : str = datasets.logging.get_logger(__name__)
lowercase__ : Dict = "\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel's Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = \"{COMET}: A Neural Framework for {MT} Evaluation\",\n author = \"Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon\",\n booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",\n pages = \"2685--2702\",\n}\n"
lowercase__ : str = "\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n"
lowercase__ : str = "\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric('comet')\n >>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use\n >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]\n >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]\n >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [0.19, 0.92]\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase__ ( self : List[Any] ) ->Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''sources''': datasets.Value('''string''' , id='''sequence''' ),
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[
'''https://github.com/Unbabel/COMET''',
'''https://www.aclweb.org/anthology/2020.emnlp-main.213/''',
'''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''',
] , )
def lowerCAmelCase__ ( self : int , UpperCAmelCase__ : Union[str, Any] ) ->Any:
if self.config_name == "default":
UpperCAmelCase_ = comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da''' ) )
else:
UpperCAmelCase_ = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def lowerCAmelCase__ ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : int=False ) ->Optional[Any]:
if gpus is None:
UpperCAmelCase_ = 1 if torch.cuda.is_available() else 0
UpperCAmelCase_ = {'''src''': sources, '''mt''': predictions, '''ref''': references}
UpperCAmelCase_ = [dict(zip(UpperCAmelCase__ , UpperCAmelCase__ ) ) for t in zip(*data.values() )]
UpperCAmelCase_ , UpperCAmelCase_ = self.scorer.predict(UpperCAmelCase__ , gpus=UpperCAmelCase__ , progress_bar=UpperCAmelCase__ )
return {"mean_score": mean_score, "scores": scores}
| 43
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'sail/poolformer_s12': 'https://huggingface.co/sail/poolformer_s12/resolve/main/config.json',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class lowerCamelCase (lowerCamelCase__ ):
"""simple docstring"""
UpperCAmelCase_ = "poolformer"
def __init__( self : Any, _UpperCAmelCase : Tuple=3, _UpperCAmelCase : int=1_6, _UpperCAmelCase : Tuple=1_6, _UpperCAmelCase : int=3, _UpperCAmelCase : Any=4.0, _UpperCAmelCase : int=[2, 2, 6, 2], _UpperCAmelCase : int=[6_4, 1_2_8, 3_2_0, 5_1_2], _UpperCAmelCase : str=[7, 3, 3, 3], _UpperCAmelCase : Optional[Any]=[4, 2, 2, 2], _UpperCAmelCase : int=[2, 1, 1, 1], _UpperCAmelCase : Union[str, Any]=4, _UpperCAmelCase : str=0.0, _UpperCAmelCase : List[str]="gelu", _UpperCAmelCase : Optional[int]=True, _UpperCAmelCase : Optional[Any]=1E-5, _UpperCAmelCase : Any=0.02, **_UpperCAmelCase : Union[str, Any], ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE__ : Tuple = patch_size
SCREAMING_SNAKE_CASE__ : str = stride
SCREAMING_SNAKE_CASE__ : str = padding
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pool_size
SCREAMING_SNAKE_CASE__ : int = hidden_sizes
SCREAMING_SNAKE_CASE__ : List[Any] = mlp_ratio
SCREAMING_SNAKE_CASE__ : Optional[int] = depths
SCREAMING_SNAKE_CASE__ : str = patch_sizes
SCREAMING_SNAKE_CASE__ : Optional[Any] = strides
SCREAMING_SNAKE_CASE__ : List[str] = num_encoder_blocks
SCREAMING_SNAKE_CASE__ : int = drop_path_rate
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE__ : List[Any] = use_layer_scale
SCREAMING_SNAKE_CASE__ : List[str] = layer_scale_init_value
SCREAMING_SNAKE_CASE__ : Optional[int] = initializer_range
super().__init__(**__lowerCamelCase )
class lowerCamelCase (lowerCamelCase__ ):
"""simple docstring"""
UpperCAmelCase_ = version.parse("1.11" )
@property
def A_ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def A_ ( self : Dict ) -> float:
"""simple docstring"""
return 2E-3
| 663
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def lowerCAmelCase__ ( lowerCamelCase_ : str ,lowerCamelCase_ : List[str]=False):
'''simple docstring'''
lowerCAmelCase__ : int = []
for i in range(config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""module.blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight"""))
rename_keys.append((f"""module.blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias"""))
rename_keys.append(
(f"""module.blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight"""))
rename_keys.append((f"""module.blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias"""))
rename_keys.append((f"""module.blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight"""))
rename_keys.append((f"""module.blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias"""))
rename_keys.append((f"""module.blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight"""))
rename_keys.append((f"""module.blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias"""))
rename_keys.append((f"""module.blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight"""))
rename_keys.append((f"""module.blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias"""))
# projection layer + position embeddings
rename_keys.extend(
[
('''module.cls_token''', '''vit.embeddings.cls_token'''),
('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''module.pos_embed''', '''vit.embeddings.position_embeddings'''),
])
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''module.norm.weight''', '''layernorm.weight'''),
('''module.norm.bias''', '''layernorm.bias'''),
])
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase__ : Any = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''') else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
])
return rename_keys
def lowerCAmelCase__ ( lowerCamelCase_ : Dict ,lowerCamelCase_ : List[Any] ,lowerCamelCase_ : List[Any]=False):
'''simple docstring'''
for i in range(config.num_hidden_layers):
if base_model:
lowerCAmelCase__ : List[str] = ''''''
else:
lowerCAmelCase__ : Optional[int] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ : List[str] = state_dict.pop(f"""module.blocks.{i}.attn.qkv.weight""")
lowerCAmelCase__ : List[Any] = state_dict.pop(f"""module.blocks.{i}.attn.qkv.bias""")
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase__ : Any = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ : Dict = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ : Dict = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase__ ( lowerCamelCase_ : Optional[int]):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ ,lowerCamelCase_)
def lowerCAmelCase__ ( lowerCamelCase_ : Union[str, Any]):
'''simple docstring'''
lowerCAmelCase__ : Any = [
'''module.fc.fc1.weight''',
'''module.fc.fc1.bias''',
'''module.fc.bn1.weight''',
'''module.fc.bn1.bias''',
'''module.fc.bn1.running_mean''',
'''module.fc.bn1.running_var''',
'''module.fc.bn1.num_batches_tracked''',
'''module.fc.fc2.weight''',
'''module.fc.fc2.bias''',
'''module.fc.bn2.weight''',
'''module.fc.bn2.bias''',
'''module.fc.bn2.running_mean''',
'''module.fc.bn2.running_var''',
'''module.fc.bn2.num_batches_tracked''',
'''module.fc.fc3.weight''',
'''module.fc.fc3.bias''',
]
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ ,lowerCamelCase_)
def lowerCAmelCase__ ( lowerCamelCase_ : Dict ,lowerCamelCase_ : int ,lowerCamelCase_ : str):
'''simple docstring'''
lowerCAmelCase__ : str = dct.pop(lowerCamelCase_)
lowerCAmelCase__ : Union[str, Any] = val
def lowerCAmelCase__ ( lowerCamelCase_ : Optional[Any] ,lowerCamelCase_ : List[Any]):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = ViTMSNConfig()
lowerCAmelCase__ : int = 1000
lowerCAmelCase__ : List[Any] = '''datasets/huggingface/label-files'''
lowerCAmelCase__ : Dict = '''imagenet-1k-id2label.json'''
lowerCAmelCase__ : Any = json.load(open(hf_hub_download(lowerCamelCase_ ,lowerCamelCase_) ,'''r'''))
lowerCAmelCase__ : Any = {int(lowerCamelCase_): v for k, v in idalabel.items()}
lowerCAmelCase__ : List[Any] = idalabel
lowerCAmelCase__ : Optional[int] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowerCAmelCase__ : str = 384
lowerCAmelCase__ : Any = 1536
lowerCAmelCase__ : List[str] = 6
elif "l16" in checkpoint_url:
lowerCAmelCase__ : Dict = 1024
lowerCAmelCase__ : int = 4096
lowerCAmelCase__ : Dict = 24
lowerCAmelCase__ : List[str] = 16
lowerCAmelCase__ : List[str] = 0.1
elif "b4" in checkpoint_url:
lowerCAmelCase__ : List[Any] = 4
elif "l7" in checkpoint_url:
lowerCAmelCase__ : Any = 7
lowerCAmelCase__ : Optional[int] = 1024
lowerCAmelCase__ : Optional[int] = 4096
lowerCAmelCase__ : Dict = 24
lowerCAmelCase__ : Optional[Any] = 16
lowerCAmelCase__ : Optional[Any] = 0.1
lowerCAmelCase__ : List[str] = ViTMSNModel(lowerCamelCase_)
lowerCAmelCase__ : int = torch.hub.load_state_dict_from_url(lowerCamelCase_ ,map_location='''cpu''')['''target_encoder''']
lowerCAmelCase__ : List[str] = ViTImageProcessor(size=config.image_size)
remove_projection_head(lowerCamelCase_)
lowerCAmelCase__ : Optional[int] = create_rename_keys(lowerCamelCase_ ,base_model=lowerCamelCase_)
for src, dest in rename_keys:
rename_key(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_)
read_in_q_k_v(lowerCamelCase_ ,lowerCamelCase_ ,base_model=lowerCamelCase_)
model.load_state_dict(lowerCamelCase_)
model.eval()
lowerCAmelCase__ : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase__ : Union[str, Any] = Image.open(requests.get(lowerCamelCase_ ,stream=lowerCamelCase_).raw)
lowerCAmelCase__ : Tuple = ViTImageProcessor(
size=config.image_size ,image_mean=lowerCamelCase_ ,image_std=lowerCamelCase_)
lowerCAmelCase__ : int = image_processor(images=lowerCamelCase_ ,return_tensors='''pt''')
# forward pass
torch.manual_seed(2)
lowerCAmelCase__ : Optional[int] = model(**lowerCamelCase_)
lowerCAmelCase__ : Union[str, Any] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowerCAmelCase__ : int = torch.tensor([[-1.0915, -1.4876, -1.1809]])
elif "b16" in checkpoint_url:
lowerCAmelCase__ : List[str] = torch.tensor([[14.2889, -18.9045, 11.7281]])
elif "l16" in checkpoint_url:
lowerCAmelCase__ : Union[str, Any] = torch.tensor([[41.5028, -22.8681, 45.6475]])
elif "b4" in checkpoint_url:
lowerCAmelCase__ : Dict = torch.tensor([[-4.3868, 5.2932, -0.4137]])
else:
lowerCAmelCase__ : Optional[int] = torch.tensor([[-0.1792, -0.6465, 2.4263]])
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] ,lowerCamelCase_ ,atol=1E-4)
print(f"""Saving model to {pytorch_dump_folder_path}""")
model.save_pretrained(lowerCamelCase_)
print(f"""Saving image processor to {pytorch_dump_folder_path}""")
image_processor.save_pretrained(lowerCamelCase_)
if __name__ == "__main__":
__snake_case : List[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__snake_case : List[str] =parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 647
| 0
|
'''simple docstring'''
_A : Tuple ="0.18.2"
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 712
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A : Dict ={
'''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''],
'''processing_git''': ['''GitProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Union[str, Any] =[
'''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GitForCausalLM''',
'''GitModel''',
'''GitPreTrainedModel''',
'''GitVisionModel''',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_A : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 631
| 0
|
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = ConsistencyModelPipeline
__snake_case = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
__snake_case = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
__snake_case = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
@property
def _snake_case ( self: str ):
__lowerCamelCase : Tuple = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet' , )
return unet
@property
def _snake_case ( self: Tuple ):
__lowerCamelCase : List[str] = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet_class_cond' , )
return unet
def _snake_case ( self: int , a: str=False ):
if class_cond:
__lowerCamelCase : str = self.dummy_cond_unet
else:
__lowerCamelCase : str = self.dummy_uncond_unet
# Default to CM multistep sampler
__lowerCamelCase : Tuple = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def _snake_case ( self: int , a: List[str] , a: Any=0 ):
if str(a ).startswith('mps' ):
__lowerCamelCase : List[Any] = torch.manual_seed(a )
else:
__lowerCamelCase : Tuple = torch.Generator(device=a ).manual_seed(a )
__lowerCamelCase : Optional[Any] = {
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [22, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Dict = self.get_dummy_components()
__lowerCamelCase : str = ConsistencyModelPipeline(**a )
__lowerCamelCase : str = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : Any = self.get_dummy_inputs(a )
__lowerCamelCase : Optional[int] = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1]
__lowerCamelCase : str = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Dict = self.get_dummy_components(class_cond=a )
__lowerCamelCase : Optional[int] = ConsistencyModelPipeline(**a )
__lowerCamelCase : Any = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : Union[str, Any] = self.get_dummy_inputs(a )
__lowerCamelCase : Tuple = 0
__lowerCamelCase : List[str] = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : Dict = image[0, -3:, -3:, -1]
__lowerCamelCase : Any = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Optional[int] = self.get_dummy_components()
__lowerCamelCase : Tuple = ConsistencyModelPipeline(**a )
__lowerCamelCase : Union[str, Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : Tuple = self.get_dummy_inputs(a )
__lowerCamelCase : str = 1
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : Any = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : int = image[0, -3:, -3:, -1]
__lowerCamelCase : Optional[int] = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self: List[str] ):
__lowerCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : List[Any] = self.get_dummy_components(class_cond=a )
__lowerCamelCase : Optional[Any] = ConsistencyModelPipeline(**a )
__lowerCamelCase : List[Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : List[str] = self.get_dummy_inputs(a )
__lowerCamelCase : List[str] = 1
__lowerCamelCase : List[str] = None
__lowerCamelCase : str = 0
__lowerCamelCase : Tuple = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : int = image[0, -3:, -3:, -1]
__lowerCamelCase : Any = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: Any ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self: Optional[int] , a: str=0 , a: Tuple=False , a: Tuple="cpu" , a: List[str]=torch.floataa , a: Optional[Any]=(1, 3, 64, 64) ):
__lowerCamelCase : Optional[Any] = torch.manual_seed(a )
__lowerCamelCase : Optional[int] = {
'num_inference_steps': None,
'timesteps': [22, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
__lowerCamelCase : Dict = self.get_fixed_latents(seed=a , device=a , dtype=a , shape=a )
__lowerCamelCase : Optional[Any] = latents
return inputs
def _snake_case ( self: Any , a: Any=0 , a: List[str]="cpu" , a: Optional[Any]=torch.floataa , a: int=(1, 3, 64, 64) ):
if type(a ) == str:
__lowerCamelCase : Dict = torch.device(a )
__lowerCamelCase : Union[str, Any] = torch.Generator(device=a ).manual_seed(a )
__lowerCamelCase : str = randn_tensor(a , generator=a , device=a , dtype=a )
return latents
def _snake_case ( self: str ):
__lowerCamelCase : Optional[int] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : Union[str, Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : str = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : int = self.get_inputs()
__lowerCamelCase : Optional[int] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : Any = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : int = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : int = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : Dict = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : List[str] = self.get_inputs()
__lowerCamelCase : Optional[Any] = 1
__lowerCamelCase : Dict = None
__lowerCamelCase : Union[str, Any] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : Tuple = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def _snake_case ( self: List[str] ):
__lowerCamelCase : Optional[Any] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : List[Any] = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : int = self.get_inputs(get_fixed_latents=a , device=a )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ):
__lowerCamelCase : int = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : Optional[Any] = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def _snake_case ( self: Dict ):
__lowerCamelCase : Dict = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : str = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : str = self.get_inputs(get_fixed_latents=a , device=a )
__lowerCamelCase : str = 1
__lowerCamelCase : Union[str, Any] = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ):
__lowerCamelCase : Optional[int] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : str = image[0, -3:, -3:, -1]
__lowerCamelCase : str = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 669
|
import math
from datetime import datetime, timedelta
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Tuple = year % 19
__lowerCamelCase : int = year % 4
__lowerCamelCase : Any = year % 7
__lowerCamelCase : Dict = math.floor(year / 100 )
__lowerCamelCase : str = math.floor((13 + 8 * leap_day_inhibits) / 25 )
__lowerCamelCase : Optional[int] = leap_day_inhibits / 4
__lowerCamelCase : str = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
__lowerCamelCase : Optional[Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__lowerCamelCase : Optional[int] = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
__lowerCamelCase : Tuple = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(SCREAMING_SNAKE_CASE__ , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(SCREAMING_SNAKE_CASE__ , 4 , 18 )
else:
return datetime(SCREAMING_SNAKE_CASE__ , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
lowercase_ = 'will be' if year > datetime.now().year else 'was'
print(F"""Easter in {year} {tense} {gauss_easter(year)}""")
| 669
| 1
|
'''simple docstring'''
def snake_case_ ( __snake_case : int = 1 , __snake_case : int = 1000) -> int:
lowerCAmelCase_ = 1
lowerCAmelCase_ = 0
for divide_by_number in range(__snake_case , digit + 1):
lowerCAmelCase_ = []
lowerCAmelCase_ = numerator
for _ in range(1 , digit + 1):
if now_divide in has_been_divided:
if longest_list_length < len(__snake_case):
lowerCAmelCase_ = len(__snake_case)
lowerCAmelCase_ = divide_by_number
else:
has_been_divided.append(__snake_case)
lowerCAmelCase_ = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 606
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __UpperCAmelCase ( unittest.TestCase ):
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=99 , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=16 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=4 , ):
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = seq_length
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_attention_mask
lowerCAmelCase_ = use_token_type_ids
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = type_vocab_size
lowerCAmelCase_ = type_sequence_label_size
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = num_choices
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ = None
if self.use_attention_mask:
lowerCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ = None
if self.use_token_type_ids:
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase_ = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = self.prepare_config_and_inputs()
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ = config_and_inputs
lowerCAmelCase_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = self.prepare_config_and_inputs()
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ = config_and_inputs
lowerCAmelCase_ = True
lowerCAmelCase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __UpperCAmelCase ( __a , unittest.TestCase ):
__A : int = True
__A : Optional[int] = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = FlaxRobertaModelTester(self )
@slow
def UpperCAmelCase_ ( self ):
for model_class_name in self.all_model_classes:
lowerCAmelCase_ = model_class_name.from_pretrained('''roberta-base''' , from_pt=_lowerCamelCase )
lowerCAmelCase_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCamelCase )
| 606
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase : Union[str, Any] =logging.get_logger(__name__)
_UpperCamelCase : Union[str, Any] ={
"microsoft/trocr-base-handwritten": (
"https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 'trocr'
SCREAMING_SNAKE_CASE_ = ['past_key_values']
SCREAMING_SNAKE_CASE_ = {
'num_attention_heads': 'decoder_attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'decoder_layers',
}
def __init__( self , _snake_case=5_02_65 , _snake_case=10_24 , _snake_case=12 , _snake_case=16 , _snake_case=40_96 , _snake_case="gelu" , _snake_case=5_12 , _snake_case=0.1 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=2 , _snake_case=0.0_2 , _snake_case=0.0 , _snake_case=True , _snake_case=False , _snake_case=True , _snake_case=True , _snake_case=1 , _snake_case=0 , _snake_case=2 , **_snake_case , ):
"""simple docstring"""
__lowerCamelCase = vocab_size
__lowerCamelCase = d_model
__lowerCamelCase = decoder_layers
__lowerCamelCase = decoder_attention_heads
__lowerCamelCase = decoder_ffn_dim
__lowerCamelCase = activation_function
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = init_std
__lowerCamelCase = decoder_layerdrop
__lowerCamelCase = use_cache
__lowerCamelCase = scale_embedding
__lowerCamelCase = use_learned_position_embeddings
__lowerCamelCase = layernorm_embedding
super().__init__(
pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , decoder_start_token_id=_snake_case , **_snake_case , )
| 316
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 'naver-clova-ix/donut-base-finetuned-docvqa'
SCREAMING_SNAKE_CASE_ = (
'This is a tool that answers a question about an document (pdf). It takes an input named `document` which '
'should be the document containing the information, as well as a `question` that is the question about the '
'document. It returns a text that contains the answer to the question.'
)
SCREAMING_SNAKE_CASE_ = 'document_qa'
SCREAMING_SNAKE_CASE_ = AutoProcessor
SCREAMING_SNAKE_CASE_ = VisionEncoderDecoderModel
SCREAMING_SNAKE_CASE_ = ['image', 'text']
SCREAMING_SNAKE_CASE_ = ['text']
def __init__( self , *_snake_case , **_snake_case ):
"""simple docstring"""
if not is_vision_available():
raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' )
super().__init__(*_snake_case , **_snake_case )
def _lowerCamelCase ( self , _snake_case , _snake_case ):
"""simple docstring"""
__lowerCamelCase = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
__lowerCamelCase = task_prompt.replace('''{user_input}''' , _snake_case )
__lowerCamelCase = self.pre_processor.tokenizer(
_snake_case , add_special_tokens=_snake_case , return_tensors='''pt''' ).input_ids
__lowerCamelCase = self.pre_processor(_snake_case , return_tensors='''pt''' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def _lowerCamelCase ( self , _snake_case ):
"""simple docstring"""
return self.model.generate(
inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=_snake_case , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=_snake_case , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=_snake_case , ).sequences
def _lowerCamelCase ( self , _snake_case ):
"""simple docstring"""
__lowerCamelCase = self.pre_processor.batch_decode(_snake_case )[0]
__lowerCamelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' )
__lowerCamelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' )
__lowerCamelCase = re.sub(r'''<.*?>''' , '''''' , _snake_case , count=1 ).strip() # remove first task start token
__lowerCamelCase = self.pre_processor.tokenajson(_snake_case )
return sequence["answer"]
| 316
| 1
|
'''simple docstring'''
class a_ :
def __init__( self , UpperCAmelCase ):
# we need a list not a string, so do something to change the type
a_ = arr.split(""",""" )
def lowerCAmelCase__ ( self ):
a_ = [int(self.array[0] )] * len(self.array )
a_ = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
a_ = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
a_ = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
lowercase__ =input('please input some numbers:')
lowercase__ =SubArray(whole_array)
lowercase__ =array.solve_sub_array()
print(('the results is:', re))
| 511
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase__ =logging.get_logger(__name__)
lowercase__ ={
'microsoft/focalnet-tiny': 'https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json',
}
class a_ ( UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase__ : int = 'focalnet'
def __init__( self , UpperCAmelCase=2_24 , UpperCAmelCase=4 , UpperCAmelCase=3 , UpperCAmelCase=96 , UpperCAmelCase=False , UpperCAmelCase=[1_92, 3_84, 7_68, 7_68] , UpperCAmelCase=[2, 2, 6, 2] , UpperCAmelCase=[2, 2, 2, 2] , UpperCAmelCase=[3, 3, 3, 3] , UpperCAmelCase="gelu" , UpperCAmelCase=4.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase=False , UpperCAmelCase=1e-4 , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase=32 , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase , ):
super().__init__(**UpperCAmelCase )
a_ = image_size
a_ = patch_size
a_ = num_channels
a_ = embed_dim
a_ = use_conv_embed
a_ = hidden_sizes
a_ = depths
a_ = focal_levels
a_ = focal_windows
a_ = hidden_act
a_ = mlp_ratio
a_ = hidden_dropout_prob
a_ = drop_path_rate
a_ = use_layerscale
a_ = layerscale_value
a_ = use_post_layernorm
a_ = use_post_layernorm_in_modulation
a_ = normalize_modulator
a_ = initializer_range
a_ = layer_norm_eps
a_ = encoder_stride
a_ = ["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )]
a_ , a_ = get_aligned_output_features_output_indices(
out_features=UpperCAmelCase , out_indices=UpperCAmelCase , stage_names=self.stage_names )
| 511
| 1
|
"""simple docstring"""
def UpperCamelCase ( _lowerCAmelCase : str, _lowerCAmelCase : int ) -> list:
_UpperCAmelCase : Any = word.split()
def justify(_lowerCAmelCase : list, _lowerCAmelCase : int, _lowerCAmelCase : int ) -> str:
_UpperCAmelCase : List[Any] = max_width - width
_UpperCAmelCase : Optional[Any] = len(_lowerCAmelCase )
if len(_lowerCAmelCase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
_UpperCAmelCase : str = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
_UpperCAmelCase : Any = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
_UpperCAmelCase : Union[str, Any] = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(_lowerCAmelCase ):
num_spaces_between_words_list[i] += 1
_UpperCAmelCase : List[str] = []
for i in range(_lowerCAmelCase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * """ """ )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(_lowerCAmelCase )
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : list[str] = []
_UpperCAmelCase : Union[str, Any] = 0
for word in words:
if width + len(_lowerCAmelCase ) + len(_lowerCAmelCase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(_lowerCAmelCase )
width += len(_lowerCAmelCase )
else:
# justify the line and add it to result
answer.append(justify(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ) )
# reset new line and new width
_UpperCAmelCase , _UpperCAmelCase : List[str] = [word], len(_lowerCAmelCase )
_UpperCAmelCase : Dict = max_width - width - len(_lowerCAmelCase )
answer.append(""" """.join(_lowerCAmelCase ) + (remaining_spaces + 1) * """ """ )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 238
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
lowerCamelCase__ : str = logging.getLogger(__name__)
@dataclass
class _UpperCAmelCase :
__a : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""})
__a : Optional[str] = field(
default=__a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""})
__a : Optional[str] = field(
default=__a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""})
__a : Optional[str] = field(
default=__a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__a : bool = field(
default=__a , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
__a : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__a : bool = field(
default=__a , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class _UpperCAmelCase :
__a : Optional[str] = field(default=__a , metadata={"""help""": """The input training data file (a text file)."""})
__a : Optional[str] = field(
default=__a , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
__a : bool = field(
default=__a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""})
__a : Optional[int] = field(
default=__a , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
__a : Optional[int] = field(
default=__a , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__a : bool = field(
default=__a , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
__a : Optional[int] = field(
default=__a , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__a : Optional[int] = field(
default=__a , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def __snake_case ( self ) -> Union[str, Any]:
'''simple docstring'''
if self.train_file is not None:
_UpperCAmelCase : Any = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
_UpperCAmelCase : int = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class _UpperCAmelCase :
__a : PreTrainedTokenizerBase
__a : Union[bool, str, PaddingStrategy] = True
__a : Optional[int] = None
__a : Optional[int] = None
def __call__( self , _A ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = """label""" if """label""" in features[0].keys() else """labels"""
_UpperCAmelCase : Any = [feature.pop(_A ) for feature in features]
_UpperCAmelCase : Optional[int] = len(_A )
_UpperCAmelCase : Union[str, Any] = len(features[0]["""input_ids"""] )
_UpperCAmelCase : str = [
[{k: v[i] for k, v in feature.items()} for i in range(_A )] for feature in features
]
_UpperCAmelCase : List[str] = list(chain(*_A ) )
_UpperCAmelCase : int = self.tokenizer.pad(
_A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
# Un-flatten
_UpperCAmelCase : Optional[int] = {k: v.view(_A , _A , -1 ) for k, v in batch.items()}
# Add back labels
_UpperCAmelCase : Any = torch.tensor(_A , dtype=torch.intaa )
return batch
def UpperCamelCase ( ) -> Dict:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_swag""", _lowerCAmelCase, _lowerCAmelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", handlers=[logging.StreamHandler(sys.stdout )], )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_UpperCAmelCase : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(_lowerCAmelCase )
datasets.utils.logging.set_verbosity(_lowerCAmelCase )
transformers.utils.logging.set_verbosity(_lowerCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
_UpperCAmelCase : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase : Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
_UpperCAmelCase : Dict = {}
if data_args.train_file is not None:
_UpperCAmelCase : Optional[int] = data_args.train_file
if data_args.validation_file is not None:
_UpperCAmelCase : Optional[int] = data_args.validation_file
_UpperCAmelCase : int = data_args.train_file.split(""".""" )[-1]
_UpperCAmelCase : List[Any] = load_dataset(
_lowerCAmelCase, data_files=_lowerCAmelCase, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
else:
# Downloading and loading the swag dataset from the hub.
_UpperCAmelCase : List[str] = load_dataset(
"""swag""", """regular""", cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase : Tuple = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
_UpperCAmelCase : List[str] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path, from_tf=bool(""".ckpt""" in model_args.model_name_or_path ), config=_lowerCAmelCase, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
_UpperCAmelCase : str = [f'''ending{i}''' for i in range(4 )]
_UpperCAmelCase : List[Any] = """sent1"""
_UpperCAmelCase : str = """sent2"""
if data_args.max_seq_length is None:
_UpperCAmelCase : Tuple = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
"""The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"""
""" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"""
""" override this default with `--block_size xxx`.""" )
_UpperCAmelCase : List[str] = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
_UpperCAmelCase : Tuple = min(data_args.max_seq_length, tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(_lowerCAmelCase : str ):
_UpperCAmelCase : int = [[context] * 4 for context in examples[context_name]]
_UpperCAmelCase : List[Any] = examples[question_header_name]
_UpperCAmelCase : List[str] = [
[f'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(_lowerCAmelCase )
]
# Flatten out
_UpperCAmelCase : Any = list(chain(*_lowerCAmelCase ) )
_UpperCAmelCase : Any = list(chain(*_lowerCAmelCase ) )
# Tokenize
_UpperCAmelCase : Dict = tokenizer(
_lowerCAmelCase, _lowerCAmelCase, truncation=_lowerCAmelCase, max_length=_lowerCAmelCase, padding="""max_length""" if data_args.pad_to_max_length else False, )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0, len(_lowerCAmelCase ), 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
_UpperCAmelCase : Any = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
_UpperCAmelCase : List[Any] = min(len(_lowerCAmelCase ), data_args.max_train_samples )
_UpperCAmelCase : List[Any] = train_dataset.select(range(_lowerCAmelCase ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
_UpperCAmelCase : List[Any] = train_dataset.map(
_lowerCAmelCase, batched=_lowerCAmelCase, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
_UpperCAmelCase : Optional[Any] = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
_UpperCAmelCase : int = min(len(_lowerCAmelCase ), data_args.max_eval_samples )
_UpperCAmelCase : List[str] = eval_dataset.select(range(_lowerCAmelCase ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
_UpperCAmelCase : List[str] = eval_dataset.map(
_lowerCAmelCase, batched=_lowerCAmelCase, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, )
# Data collator
_UpperCAmelCase : List[Any] = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=_lowerCAmelCase, pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(_lowerCAmelCase : List[Any] ):
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = eval_predictions
_UpperCAmelCase : Dict = np.argmax(_lowerCAmelCase, axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
_UpperCAmelCase : Union[str, Any] = Trainer(
model=_lowerCAmelCase, args=_lowerCAmelCase, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=_lowerCAmelCase, data_collator=_lowerCAmelCase, compute_metrics=_lowerCAmelCase, )
# Training
if training_args.do_train:
_UpperCAmelCase : Dict = None
if training_args.resume_from_checkpoint is not None:
_UpperCAmelCase : Optional[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCAmelCase : int = last_checkpoint
_UpperCAmelCase : Union[str, Any] = trainer.train(resume_from_checkpoint=_lowerCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
_UpperCAmelCase : List[Any] = train_result.metrics
_UpperCAmelCase : Dict = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowerCAmelCase )
)
_UpperCAmelCase : List[Any] = min(_lowerCAmelCase, len(_lowerCAmelCase ) )
trainer.log_metrics("""train""", _lowerCAmelCase )
trainer.save_metrics("""train""", _lowerCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_UpperCAmelCase : Optional[Any] = trainer.evaluate()
_UpperCAmelCase : List[str] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_lowerCAmelCase )
_UpperCAmelCase : Optional[int] = min(_lowerCAmelCase, len(_lowerCAmelCase ) )
trainer.log_metrics("""eval""", _lowerCAmelCase )
trainer.save_metrics("""eval""", _lowerCAmelCase )
_UpperCAmelCase : int = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """multiple-choice""",
"""dataset_tags""": """swag""",
"""dataset_args""": """regular""",
"""dataset""": """SWAG""",
"""language""": """en""",
}
if training_args.push_to_hub:
trainer.push_to_hub(**_lowerCAmelCase )
else:
trainer.create_model_card(**_lowerCAmelCase )
def UpperCamelCase ( _lowerCAmelCase : Tuple ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 238
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class snake_case__ ( unittest.TestCase):
def __init__( self : List[Any] , _A : str , _A : Dict=13 , _A : Union[str, Any]=7 , _A : Tuple=True , _A : Optional[Any]=True , _A : str=True , _A : str=True , _A : int=99 , _A : Optional[int]=32 , _A : Tuple=5 , _A : Dict=4 , _A : List[str]=37 , _A : Any="gelu" , _A : Tuple=0.1 , _A : Optional[Any]=0.1 , _A : Tuple=5_12 , _A : Optional[int]=16 , _A : int=2 , _A : List[str]=0.02 , _A : Optional[Any]=4 , ) -> Dict:
UpperCAmelCase_ : Dict = parent
UpperCAmelCase_ : Dict = batch_size
UpperCAmelCase_ : str = seq_length
UpperCAmelCase_ : int = is_training
UpperCAmelCase_ : Tuple = use_attention_mask
UpperCAmelCase_ : Any = use_token_type_ids
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : List[Any] = hidden_size
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : Optional[Any] = num_attention_heads
UpperCAmelCase_ : Optional[Any] = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : Optional[Any] = hidden_dropout_prob
UpperCAmelCase_ : Tuple = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[int] = max_position_embeddings
UpperCAmelCase_ : Tuple = type_vocab_size
UpperCAmelCase_ : List[Any] = type_sequence_label_size
UpperCAmelCase_ : str = initializer_range
UpperCAmelCase_ : List[str] = num_choices
def A ( self : Tuple ) -> Optional[int]:
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : List[str] = None
if self.use_attention_mask:
UpperCAmelCase_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Optional[Any] = None
if self.use_token_type_ids:
UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ : str = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def A ( self : List[str] ) -> List[str]:
UpperCAmelCase_ : List[str] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = config_and_inputs
UpperCAmelCase_ : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class snake_case__ ( UpperCamelCase , unittest.TestCase):
a_ = True
a_ = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def A ( self : Tuple ) -> str:
UpperCAmelCase_ : Optional[int] = FlaxRoFormerModelTester(self )
@slow
def A ( self : List[str] ) -> Union[str, Any]:
for model_class_name in self.all_model_classes:
UpperCAmelCase_ : List[str] = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''' , from_pt=_A )
UpperCAmelCase_ : int = model(np.ones((1, 1) ) )
self.assertIsNotNone(_A )
@require_flax
class snake_case__ ( unittest.TestCase):
@slow
def A ( self : List[str] ) -> Dict:
UpperCAmelCase_ : str = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
UpperCAmelCase_ : str = jnp.array([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase_ : Union[str, Any] = model(_A )[0]
UpperCAmelCase_ : List[str] = 5_00_00
UpperCAmelCase_ : Any = (1, 6, vocab_size)
self.assertEqual(output.shape , _A )
UpperCAmelCase_ : str = jnp.array(
[[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , _A , atol=1e-4 ) )
| 216
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case__ ( UpperCamelCase , unittest.TestCase):
a_ = BioGptTokenizer
a_ = False
def A ( self : int ) -> List[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_ : Any = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
UpperCAmelCase_ : List[str] = dict(zip(_A , range(len(_A ) ) ) )
UpperCAmelCase_ : Tuple = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
UpperCAmelCase_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(_A ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(_A ) )
def A ( self : List[Any] , _A : Optional[Any] ) -> Any:
UpperCAmelCase_ : int = '''lower newer'''
UpperCAmelCase_ : Tuple = '''lower newer'''
return input_text, output_text
def A ( self : str ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = BioGptTokenizer(self.vocab_file , self.merges_file )
UpperCAmelCase_ : Tuple = '''lower'''
UpperCAmelCase_ : List[Any] = ['''low''', '''er</w>''']
UpperCAmelCase_ : List[Any] = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
UpperCAmelCase_ : Any = tokens + ['''<unk>''']
UpperCAmelCase_ : List[Any] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , _A )
@slow
def A ( self : int ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
UpperCAmelCase_ : List[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=_A )
UpperCAmelCase_ : Any = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_A )
UpperCAmelCase_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_A )
UpperCAmelCase_ : Optional[int] = tokenizer.build_inputs_with_special_tokens(_A , _A )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 216
| 1
|
'''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self :List[str] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :List[Any]=7 , SCREAMING_SNAKE_CASE :Optional[Any]=3 , SCREAMING_SNAKE_CASE :Tuple=1_8 , SCREAMING_SNAKE_CASE :Any=3_0 , SCREAMING_SNAKE_CASE :List[str]=4_0_0 , SCREAMING_SNAKE_CASE :Optional[Any]=True , SCREAMING_SNAKE_CASE :Dict=None , SCREAMING_SNAKE_CASE :List[str]=True , ) -> Tuple:
'''simple docstring'''
_a : int =size if size is not None else {"""height""": 1_8, """width""": 1_8}
_a : int =parent
_a : Optional[int] =batch_size
_a : List[str] =num_channels
_a : Optional[Any] =image_size
_a : int =min_resolution
_a : str =max_resolution
_a : str =do_resize
_a : Tuple =size
_a : Tuple =do_normalize
def __UpperCAmelCase ( self :Any ) -> int:
'''simple docstring'''
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_866_443_634_033_203, 0.6_618_829_369_544_983, 0.3_891_746_401_786_804],
[-0.6_042_559_146_881_104, -0.02_295_008_860_528_469, 0.5_423_797_369_003_296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class A__ ( UpperCAmelCase__ , unittest.TestCase ):
__UpperCamelCase : int = ImageGPTImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self :List[Any] ) -> List[Any]:
'''simple docstring'''
_a : Any =ImageGPTImageProcessingTester(self )
@property
def __UpperCAmelCase ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self :Dict ) -> Any:
'''simple docstring'''
_a : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """clusters""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """do_resize""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """size""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """do_normalize""" ) )
def __UpperCAmelCase ( self :Optional[int] ) -> Optional[int]:
'''simple docstring'''
_a : Optional[int] =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8} )
_a : Union[str, Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2} )
def __UpperCAmelCase ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
_a : List[str] =self.image_processing_class(**self.image_processor_dict )
_a : Dict =json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , obj[key] ) )
else:
self.assertEqual(obj[key] , SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[int] ) -> Dict:
'''simple docstring'''
_a : List[Any] =self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Any =os.path.join(SCREAMING_SNAKE_CASE , """image_processor.json""" )
image_processor_first.to_json_file(SCREAMING_SNAKE_CASE )
_a : str =self.image_processing_class.from_json_file(SCREAMING_SNAKE_CASE ).to_dict()
_a : Tuple =image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[int] ) -> str:
'''simple docstring'''
_a : List[str] =self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(SCREAMING_SNAKE_CASE )
_a : str =self.image_processing_class.from_pretrained(SCREAMING_SNAKE_CASE ).to_dict()
_a : Union[str, Any] =image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , SCREAMING_SNAKE_CASE )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def __UpperCAmelCase ( self :Union[str, Any] ) -> int:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]:
_a : Any =load_dataset("""hf-internal-testing/fixtures_image_utils""" ,split="""test""" )
_a : Dict =Image.open(dataset[4]["""file"""] )
_a : Optional[int] =Image.open(dataset[5]["""file"""] )
_a : Optional[Any] =[imagea, imagea]
return images
@require_vision
@require_torch
class A__ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_a : Optional[Any] =ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
_a : int =prepare_images()
# test non-batched
_a : Dict =image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1_0_2_4) )
_a : Optional[int] =[3_0_6, 1_9_1, 1_9_1]
self.assertEqual(encoding.input_ids[0, :3].tolist() , SCREAMING_SNAKE_CASE )
# test batched
_a : Dict =image_processing(SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1_0_2_4) )
_a : Any =[3_0_3, 1_3, 1_3]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , SCREAMING_SNAKE_CASE )
| 694
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class A__ ( unittest.TestCase ):
def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Any=1_3 , SCREAMING_SNAKE_CASE :Any=7 , SCREAMING_SNAKE_CASE :Any=True , SCREAMING_SNAKE_CASE :int=True , SCREAMING_SNAKE_CASE :Optional[int]=True , SCREAMING_SNAKE_CASE :List[str]=True , SCREAMING_SNAKE_CASE :Optional[Any]=9_9 , SCREAMING_SNAKE_CASE :Tuple=3_2 , SCREAMING_SNAKE_CASE :Union[str, Any]=5 , SCREAMING_SNAKE_CASE :List[str]=4 , SCREAMING_SNAKE_CASE :int=3_7 , SCREAMING_SNAKE_CASE :Optional[Any]="gelu" , SCREAMING_SNAKE_CASE :Optional[int]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=0.1 , SCREAMING_SNAKE_CASE :Dict=5_1_2 , SCREAMING_SNAKE_CASE :List[Any]=1_6 , SCREAMING_SNAKE_CASE :Union[str, Any]=2 , SCREAMING_SNAKE_CASE :List[Any]=0.02 , SCREAMING_SNAKE_CASE :int=4 , ) -> Tuple:
'''simple docstring'''
_a : Optional[Any] =parent
_a : List[str] =batch_size
_a : List[str] =seq_length
_a : List[Any] =is_training
_a : Optional[int] =use_attention_mask
_a : List[Any] =use_token_type_ids
_a : List[Any] =use_labels
_a : Optional[Any] =vocab_size
_a : str =hidden_size
_a : List[Any] =num_hidden_layers
_a : List[Any] =num_attention_heads
_a : Union[str, Any] =intermediate_size
_a : int =hidden_act
_a : List[str] =hidden_dropout_prob
_a : Optional[int] =attention_probs_dropout_prob
_a : Dict =max_position_embeddings
_a : Any =type_vocab_size
_a : str =type_sequence_label_size
_a : str =initializer_range
_a : List[str] =num_choices
def __UpperCAmelCase ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
_a : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a : Dict =None
if self.use_attention_mask:
_a : Any =random_attention_mask([self.batch_size, self.seq_length] )
_a : Optional[int] =None
if self.use_token_type_ids:
_a : Any =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a : Union[str, Any] =RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCAmelCase ( self :Optional[Any] ) -> int:
'''simple docstring'''
_a : Tuple =self.prepare_config_and_inputs()
_a , _a , _a , _a : List[Any] =config_and_inputs
_a : Optional[int] ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def __UpperCAmelCase ( self :int ) -> str:
'''simple docstring'''
_a : List[Any] =self.prepare_config_and_inputs()
_a , _a , _a , _a : Optional[int] =config_and_inputs
_a : Tuple =True
_a : Optional[Any] =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_a : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class A__ ( UpperCAmelCase__ , unittest.TestCase ):
__UpperCamelCase : Union[str, Any] = True
__UpperCamelCase : Dict = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCAmelCase ( self :List[str] ) -> Optional[int]:
'''simple docstring'''
_a : Union[str, Any] =FlaxRobertaPreLayerNormModelTester(self )
@slow
def __UpperCAmelCase ( self :str ) -> int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_a : Optional[int] =model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE )
_a : Dict =model(np.ones((1, 1) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
@require_flax
class A__ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self :Any ) -> str:
'''simple docstring'''
_a : str =FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE )
_a : List[Any] =np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
_a : Dict =model(SCREAMING_SNAKE_CASE )[0]
_a : List[Any] =[1, 1_1, 5_0_2_6_5]
self.assertEqual(list(output.shape ) , SCREAMING_SNAKE_CASE )
# compare the actual values for a slice.
_a : Any =np.array(
[[[40.4_880, 18.0_199, -5.2_367], [-1.8_877, -4.0_885, 10.7_085], [-2.2_613, -5.6_110, 7.2_665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@slow
def __UpperCAmelCase ( self :int ) -> int:
'''simple docstring'''
_a : Union[str, Any] =FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE )
_a : Any =np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
_a : Optional[int] =model(SCREAMING_SNAKE_CASE )[0]
# compare the actual values for a slice.
_a : str =np.array(
[[[0.0_208, -0.0_356, 0.0_237], [-0.1_569, -0.0_411, -0.2_626], [0.1_879, 0.0_125, -0.0_089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 694
| 1
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowercase = 16
__lowercase = 32
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 16 ):
lowerCAmelCase_ : Optional[int] =AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowerCAmelCase_ : Any =load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(_SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ : Any =tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase_ : Tuple =datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ : Union[str, Any] =tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(_SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase_ : List[str] =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase_ : List[str] =16
elif accelerator.mixed_precision != "no":
lowerCAmelCase_ : List[Any] =8
else:
lowerCAmelCase_ : List[Any] =None
return tokenizer.pad(
_SCREAMING_SNAKE_CASE , padding='''longest''' , max_length=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowerCAmelCase_ : Tuple =DataLoader(
tokenized_datasets['''train'''] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ : str =DataLoader(
tokenized_datasets['''validation'''] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__lowercase = mocked_dataloaders # noqa: F811
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , _SCREAMING_SNAKE_CASE ) == "1":
lowerCAmelCase_ : str =2
# New Code #
lowerCAmelCase_ : List[str] =int(args.gradient_accumulation_steps )
# Initialize accelerator
lowerCAmelCase_ : List[str] =Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_SCREAMING_SNAKE_CASE )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'''Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ : Dict =config['''lr''']
lowerCAmelCase_ : Union[str, Any] =int(config['''num_epochs'''] )
lowerCAmelCase_ : Union[str, Any] =int(config['''seed'''] )
lowerCAmelCase_ : Tuple =int(config['''batch_size'''] )
lowerCAmelCase_ : Optional[Any] =evaluate.load('''glue''' , '''mrpc''' )
set_seed(_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] =get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ : str =AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=_SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase_ : str =model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase_ : Optional[int] =AdamW(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE )
# Instantiate scheduler
lowerCAmelCase_ : Union[str, Any] =get_linear_schedule_with_warmup(
optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(_SCREAMING_SNAKE_CASE ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Any =accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(_SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase_ : Tuple =model(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ : str =output.loss
accelerator.backward(_SCREAMING_SNAKE_CASE )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase_ : List[Any] =model(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ : List[str] =outputs.logits.argmax(dim=-1 )
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] =accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , )
lowerCAmelCase_ : List[Any] =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE__ ( ):
lowerCAmelCase_ : Dict =argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' , type=_SCREAMING_SNAKE_CASE , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
lowerCAmelCase_ : Union[str, Any] =parser.parse_args()
lowerCAmelCase_ : Union[str, Any] ={'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 305
|
'''simple docstring'''
import operator as op
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
lowerCAmelCase_ : Union[str, Any] =[]
lowerCAmelCase_ : Tuple =lambda _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int(x / y ) # noqa: E731 integer division operation
lowerCAmelCase_ : Any ={
'''^''': op.pow,
'''*''': op.mul,
'''/''': div,
'''+''': op.add,
'''-''': op.sub,
} # operators & their respective operation
# print table header
print('''Symbol'''.center(8 ) , '''Action'''.center(12 ) , '''Stack''' , sep=''' | ''' )
print('''-''' * (30 + len(_SCREAMING_SNAKE_CASE )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(_SCREAMING_SNAKE_CASE ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('''push(''' + x + ''')''').ljust(12 ) , ''','''.join(_SCREAMING_SNAKE_CASE ) , sep=''' | ''' )
else:
lowerCAmelCase_ : Any =stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ) , ('''pop(''' + b + ''')''').ljust(12 ) , ''','''.join(_SCREAMING_SNAKE_CASE ) , sep=''' | ''' )
lowerCAmelCase_ : int =stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ) , ('''pop(''' + a + ''')''').ljust(12 ) , ''','''.join(_SCREAMING_SNAKE_CASE ) , sep=''' | ''' )
stack.append(
str(opr[x](int(_SCREAMING_SNAKE_CASE ) , int(_SCREAMING_SNAKE_CASE ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('''push(''' + a + x + b + ''')''').ljust(12 ) , ''','''.join(_SCREAMING_SNAKE_CASE ) , sep=''' | ''' , )
return int(stack[0] )
if __name__ == "__main__":
__lowercase = input('''\n\nEnter a Postfix Equation (space separated) = ''').split(''' ''')
print('''\n\tResult = ''', solve(Postfix))
| 305
| 1
|
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def lowerCamelCase_ ( _UpperCamelCase ) -> Any:
"""simple docstring"""
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCamelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCamelCase_ ( ) -> Tuple:
"""simple docstring"""
snake_case_ : str = '''mock-s3-bucket'''
snake_case_ : str = f'''s3://{mock_bucket}'''
snake_case_ : Any = extract_path_from_uri(_UpperCamelCase )
assert dataset_path.startswith('''s3://''' ) is False
snake_case_ : Optional[Any] = '''./local/path'''
snake_case_ : List[str] = extract_path_from_uri(_UpperCamelCase )
assert dataset_path == new_dataset_path
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
snake_case_ : Union[str, Any] = is_remote_filesystem(_UpperCamelCase )
assert is_remote is True
snake_case_ : Union[str, Any] = fsspec.filesystem('''file''' )
snake_case_ : int = is_remote_filesystem(_UpperCamelCase )
assert is_remote is False
@pytest.mark.parametrize('''compression_fs_class''' , _UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple:
"""simple docstring"""
snake_case_ : Optional[Any] = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file}
snake_case_ : Optional[Any] = input_paths[compression_fs_class.protocol]
if input_path is None:
snake_case_ : List[Any] = f'''for \'{compression_fs_class.protocol}\' compression protocol, '''
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_UpperCamelCase )
snake_case_ : Dict = fsspec.filesystem(compression_fs_class.protocol , fo=_UpperCamelCase )
assert isinstance(_UpperCamelCase , _UpperCamelCase )
snake_case_ : int = os.path.basename(_UpperCamelCase )
snake_case_ : Any = expected_filename[: expected_filename.rindex('''.''' )]
assert fs.glob('''*''' ) == [expected_filename]
with fs.open(_UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f, open(_UpperCamelCase , encoding='''utf-8''' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Union[str, Any] = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path}
snake_case_ : Any = compressed_file_paths[protocol]
snake_case_ : Any = '''dataset.jsonl'''
snake_case_ : Dict = f'''{protocol}://{member_file_path}::{compressed_file_path}'''
snake_case_ , *snake_case_ : Optional[Any] = fsspec.get_fs_token_paths(_UpperCamelCase )
assert fs.isfile(_UpperCamelCase )
assert not fs.isfile('''non_existing_''' + member_file_path )
@pytest.mark.integration
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
"""simple docstring"""
snake_case_ : Optional[int] = hf_api.dataset_info(_UpperCamelCase , token=_UpperCamelCase )
snake_case_ : List[str] = HfFileSystem(repo_info=_UpperCamelCase , token=_UpperCamelCase )
assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"]
assert hffs.isdir('''data''' )
assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' )
with open(_UpperCamelCase ) as f:
assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read()
def lowerCamelCase_ ( ) -> Any:
"""simple docstring"""
snake_case_ : Tuple = '''bz2'''
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(_UpperCamelCase , _UpperCamelCase , clobber=_UpperCamelCase )
with pytest.warns(_UpperCamelCase ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(_UpperCamelCase ) == 1
assert (
str(warning_info[0].message )
== f'''A filesystem protocol was already set for {protocol} and will be overwritten.'''
)
| 60
|
from sklearn.metrics import mean_squared_error
import datasets
lowerCAmelCase_ = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
lowerCAmelCase_ = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
lowerCAmelCase_ = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'''
] , )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('''float''' ) ),
"references": datasets.Sequence(datasets.Value('''float''' ) ),
}
else:
return {
"predictions": datasets.Value('''float''' ),
"references": datasets.Value('''float''' ),
}
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__="uniform_average" , __magic_name__=True ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = mean_squared_error(
__magic_name__ , __magic_name__ , sample_weight=__magic_name__ , multioutput=__magic_name__ , squared=__magic_name__ )
return {"mse": mse}
| 60
| 1
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """Speech2TextFeatureExtractor"""
_SCREAMING_SNAKE_CASE = """Speech2TextTokenizer"""
def __init__( self : List[str], _snake_case : Optional[Any], _snake_case : str ) ->Optional[Any]:
super().__init__(_snake_case, _snake_case )
snake_case__ : int = self.feature_extractor
snake_case__ : Optional[Any] = False
def __call__( self : Any, *_snake_case : Any, **_snake_case : List[Any] ) ->Dict:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_snake_case, **_snake_case )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
snake_case__ : Optional[Any] = kwargs.pop('raw_speech' )
else:
snake_case__ : Tuple = kwargs.pop('audio', _snake_case )
snake_case__ : List[str] = kwargs.pop('sampling_rate', _snake_case )
snake_case__ : List[Any] = kwargs.pop('text', _snake_case )
if len(_snake_case ) > 0:
snake_case__ : Optional[Any] = args[0]
snake_case__ : Optional[int] = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
snake_case__ : Optional[Any] = self.feature_extractor(_snake_case, *_snake_case, sampling_rate=_snake_case, **_snake_case )
if text is not None:
snake_case__ : Optional[Any] = self.tokenizer(_snake_case, **_snake_case )
if text is None:
return inputs
elif audio is None:
return encodings
else:
snake_case__ : List[str] = encodings['input_ids']
return inputs
def lowercase_ ( self : Tuple, *_snake_case : Optional[Any], **_snake_case : List[str] ) ->List[str]:
return self.tokenizer.batch_decode(*_snake_case, **_snake_case )
def lowercase_ ( self : Optional[Any], *_snake_case : Tuple, **_snake_case : int ) ->List[str]:
return self.tokenizer.decode(*_snake_case, **_snake_case )
@contextmanager
def lowercase_ ( self : Dict ) ->Tuple:
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
snake_case__ : Dict = True
snake_case__ : Tuple = self.tokenizer
yield
snake_case__ : List[str] = self.feature_extractor
snake_case__ : List[str] = False
| 243
|
from math import sqrt
def lowercase_ (A : int ):
snake_case__ : Optional[int] = 0
for i in range(1 , int(sqrt(A ) + 1 ) ):
if n % i == 0 and i != sqrt(A ):
total += i + n // i
elif i == sqrt(A ):
total += i
return total - n
def lowercase_ (A : int = 1_0_0_0_0 ):
snake_case__ : Any = sum(
i
for i in range(1 , A )
if sum_of_divisors(sum_of_divisors(A ) ) == i and sum_of_divisors(A ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 243
| 1
|
'''simple docstring'''
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
__SCREAMING_SNAKE_CASE : Tuple =logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE__ ( snake_case_ ):
"""simple docstring"""
def __init__( self , A , A=7_68 ) -> Tuple:
super().__init__(A )
A: Union[str, Any] = proj_size
A: Any = CLIPVisionModel(A )
A: Tuple = PaintByExampleMapper(A )
A: List[str] = nn.LayerNorm(config.hidden_size )
A: Union[str, Any] = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
A: List[str] = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def a__ ( self , A , A=False ) -> Any:
A: str = self.model(pixel_values=A )
A: Tuple = clip_output.pooler_output
A: Optional[Any] = self.mapper(latent_states[:, None] )
A: Dict = self.final_layer_norm(A )
A: Optional[Any] = self.proj_out(A )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
"""simple docstring"""
def __init__( self , A ) -> str:
super().__init__()
A: Any = (config.num_hidden_layers + 1) // 5
A: Any = config.hidden_size
A: Tuple = 1
A: List[str] = nn.ModuleList(
[
BasicTransformerBlock(A , A , A , activation_fn="""gelu""" , attention_bias=A )
for _ in range(A )
] )
def a__ ( self , A ) -> Any:
for block in self.blocks:
A: Dict = block(A )
return hidden_states
| 135
|
'''simple docstring'''
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
__SCREAMING_SNAKE_CASE : List[str] =[
'kernels/rwkv/wkv_cuda.cu',
'kernels/rwkv/wkv_op.cpp',
'kernels/deformable_detr/ms_deform_attn.h',
'kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh',
'models/graphormer/algos_graphormer.pyx',
]
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[Any] =argparse.ArgumentParser()
parser.add_argument('--check_lib', action='store_true', help='Whether to check the build or the actual package.')
__SCREAMING_SNAKE_CASE : Union[str, Any] =parser.parse_args()
if args.check_lib:
__SCREAMING_SNAKE_CASE : List[str] =importlib.import_module('transformers')
__SCREAMING_SNAKE_CASE : Any =Path(transformers_module.__file__).parent
else:
__SCREAMING_SNAKE_CASE : List[Any] =Path.cwd() / 'build/lib/transformers'
if not test_custom_files_are_present(transformers_path):
raise ValueError('The built release does not contain the custom files. Fix this before going further!')
| 135
| 1
|
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__magic_name__ = 16
__magic_name__ = 32
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ = 16 ):
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""bert-base-cased""" )
__SCREAMING_SNAKE_CASE = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(UpperCamelCase_ ):
# max_length=None => use the model max length (it's actually the default)
__SCREAMING_SNAKE_CASE = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__SCREAMING_SNAKE_CASE = datasets.map(
UpperCamelCase_ , batched=UpperCamelCase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(UpperCamelCase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__SCREAMING_SNAKE_CASE = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__SCREAMING_SNAKE_CASE = 16
elif accelerator.mixed_precision != "no":
__SCREAMING_SNAKE_CASE = 8
else:
__SCREAMING_SNAKE_CASE = None
return tokenizer.pad(
UpperCamelCase_ , padding="""longest""" , max_length=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_tensors="""pt""" , )
# Instantiate dataloaders.
__SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets["""train"""] , shuffle=UpperCamelCase_ , collate_fn=UpperCamelCase_ , batch_size=UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets["""validation"""] , shuffle=UpperCamelCase_ , collate_fn=UpperCamelCase_ , batch_size=UpperCamelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__magic_name__ = mocked_dataloaders # noqa: F811
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , UpperCamelCase_ ) == "1":
__SCREAMING_SNAKE_CASE = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
__SCREAMING_SNAKE_CASE = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="""all""" , project_dir=args.project_dir )
else:
__SCREAMING_SNAKE_CASE = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__SCREAMING_SNAKE_CASE = config["""lr"""]
__SCREAMING_SNAKE_CASE = int(config["""num_epochs"""] )
__SCREAMING_SNAKE_CASE = int(config["""seed"""] )
__SCREAMING_SNAKE_CASE = int(config["""batch_size"""] )
set_seed(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = get_dataloaders(UpperCamelCase_ , UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
__SCREAMING_SNAKE_CASE = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__SCREAMING_SNAKE_CASE = batch_size // MAX_GPU_BATCH_SIZE
__SCREAMING_SNAKE_CASE = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=UpperCamelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__SCREAMING_SNAKE_CASE = model.to(accelerator.device )
# Instantiate optimizer
__SCREAMING_SNAKE_CASE = AdamW(params=model.parameters() , lr=UpperCamelCase_ )
# Instantiate scheduler
__SCREAMING_SNAKE_CASE = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase_ , num_warmup_steps=100 , num_training_steps=(len(UpperCamelCase_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
__SCREAMING_SNAKE_CASE = os.path.split(UpperCamelCase_ )[-1].split(""".""" )[0]
accelerator.init_trackers(UpperCamelCase_ , UpperCamelCase_ )
# Now we train the model
for epoch in range(UpperCamelCase_ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
__SCREAMING_SNAKE_CASE = 0
for step, batch in enumerate(UpperCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__SCREAMING_SNAKE_CASE = model(**UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
__SCREAMING_SNAKE_CASE = loss / gradient_accumulation_steps
accelerator.backward(UpperCamelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1 )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=UpperCamelCase_ , references=UpperCamelCase_ , )
__SCREAMING_SNAKE_CASE = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , UpperCamelCase_ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"""accuracy""": eval_metric["""accuracy"""],
"""f1""": eval_metric["""f1"""],
"""train_loss""": total_loss.item() / len(UpperCamelCase_ ),
"""epoch""": epoch,
} , step=UpperCamelCase_ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def _lowerCAmelCase ( ):
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=UpperCamelCase_ , default=UpperCamelCase_ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
parser.add_argument(
"""--with_tracking""" , action="""store_true""" , help="""Whether to load in all available experiment trackers from the environment and use them for logging.""" , )
parser.add_argument(
"""--project_dir""" , type=UpperCamelCase_ , default="""logs""" , help="""Location on where to store experiment tracking logs` and relevent project information""" , )
__SCREAMING_SNAKE_CASE = parser.parse_args()
__SCREAMING_SNAKE_CASE = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(UpperCamelCase_ , UpperCamelCase_ )
if __name__ == "__main__":
main()
| 248
|
"""simple docstring"""
import baseaa
def _lowerCAmelCase ( UpperCamelCase_ ):
return baseaa.baaencode(string.encode("""utf-8""" ) )
def _lowerCAmelCase ( UpperCamelCase_ ):
return baseaa.baadecode(UpperCamelCase_ ).decode("""utf-8""" )
if __name__ == "__main__":
__magic_name__ = "Hello World!"
__magic_name__ = baseaa_encode(test)
print(encoded)
__magic_name__ = baseaa_decode(encoded)
print(decoded)
| 248
| 1
|
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
A : Union[str, Any] = ["gpt2"]
A : List[str] = "gpt2"
if is_tf_available():
class __A( tf.Module ):
def __init__( self , _snake_case ) -> Dict:
'''simple docstring'''
super().__init__()
__a = tokenizer
__a = AutoConfig.from_pretrained(_a )
__a = TFGPTaLMHeadModel.from_config(_a )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='''text''' ),) )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Tuple:
'''simple docstring'''
__a = self.tokenizer(_a )
__a = tokenized['''input_ids'''].to_tensor()
__a = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
__a = self.model(input_ids=_a , attention_mask=_a )['''logits''']
return outputs
@require_tf
@require_keras_nlp
class __A( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
super().setUp()
__a = [GPTaTokenizer.from_pretrained(_a ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
__a = [TFGPTaTokenizer.from_pretrained(_a ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__a = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
__a = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
__a = tokenizer([test_inputs] , return_tensors='''tf''' )
__a = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
__a = python_outputs[key].numpy()
__a = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(_a , tf.intaa ) == tf_outputs_values ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
__a = tf.function(_a )
for test_inputs in self.test_sentences:
__a = tf.constant(_a )
__a = compiled_tokenizer(_a )
__a = tf_tokenizer(_a )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
__a = ModelToSave(tokenizer=_a )
__a = tf.convert_to_tensor([self.test_sentences[0]] )
__a = model.serving(_a ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__a = Path(_a ) / '''saved.model'''
tf.saved_model.save(_a , _a , signatures={'''serving_default''': model.serving} )
__a = tf.saved_model.load(_a )
__a = loaded_model.signatures['''serving_default'''](_a )['''output_0''']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
__a = tf.convert_to_tensor([self.test_sentences[0]] )
__a = tf_tokenizer(_a ) # Build model with some sample inputs
__a = tf_tokenizer.get_config()
__a = TFGPTaTokenizer.from_config(_a )
__a = model_from_config(_a )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
__a = 123_123
for max_length in [3, 5, 1_024]:
__a = tf.convert_to_tensor([self.test_sentences[0]] )
__a = tf_tokenizer(_a , max_length=_a )
__a = out['''input_ids'''].numpy().shape[1]
assert out_length == max_length
| 219
|
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase : Dict = "▁"
lowerCamelCase : Union[str, Any] = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
}
lowerCamelCase : Union[str, Any] = {
"vocab_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"
),
},
"spm_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"
)
},
}
lowerCamelCase : List[str] = {
"facebook/s2t-small-librispeech-asr": 1_0_2_4,
}
lowerCamelCase : str = ["pt", "fr", "ru", "nl", "ro", "it", "es", "de"]
lowerCamelCase : List[Any] = {"mustc": MUSTC_LANGS}
class A__ ( A__ ):
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = MAX_MODEL_INPUT_SIZES
A__ = ['input_ids', 'attention_mask']
A__ = []
def __init__( self : List[str] , _a : Tuple , _a : Optional[Any] , _a : Tuple="<s>" , _a : List[Any]="</s>" , _a : Union[str, Any]="<pad>" , _a : List[Any]="<unk>" , _a : Optional[int]=False , _a : Optional[Any]=False , _a : List[str]=None , _a : Any=None , _a : Optional[Dict[str, Any]] = None , **_a : str , ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a , eos_token=_a , unk_token=_a , pad_token=_a , do_upper_case=_a , do_lower_case=_a , tgt_lang=_a , lang_codes=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
_SCREAMING_SNAKE_CASE =do_upper_case
_SCREAMING_SNAKE_CASE =do_lower_case
_SCREAMING_SNAKE_CASE =load_json(_a )
_SCREAMING_SNAKE_CASE ={v: k for k, v in self.encoder.items()}
_SCREAMING_SNAKE_CASE =spm_file
_SCREAMING_SNAKE_CASE =load_spm(_a , self.sp_model_kwargs )
if lang_codes is not None:
_SCREAMING_SNAKE_CASE =lang_codes
_SCREAMING_SNAKE_CASE =LANGUAGES[lang_codes]
_SCREAMING_SNAKE_CASE =[f"<lang:{lang}>" for lang in self.langs]
_SCREAMING_SNAKE_CASE ={lang: self.sp_model.PieceToId(f"<lang:{lang}>" ) for lang in self.langs}
_SCREAMING_SNAKE_CASE =self.lang_tokens
_SCREAMING_SNAKE_CASE =tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
_SCREAMING_SNAKE_CASE ={}
@property
def A ( self : Union[str, Any] ) -> int:
'''simple docstring'''
return len(self.encoder )
@property
def A ( self : str ) -> str:
'''simple docstring'''
return self._tgt_lang
@tgt_lang.setter
def A ( self : Dict , _a : Optional[int] ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =new_tgt_lang
self.set_tgt_lang_special_tokens(_a )
def A ( self : Any , _a : str ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.lang_code_to_id[tgt_lang]
_SCREAMING_SNAKE_CASE =[lang_code_id]
def A ( self : Any , _a : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(_a , out_type=_a )
def A ( self : List[str] , _a : Optional[Any] ) -> Dict:
'''simple docstring'''
return self.encoder.get(_a , self.encoder[self.unk_token] )
def A ( self : str , _a : int ) -> str:
'''simple docstring'''
return self.decoder.get(_a , self.unk_token )
def A ( self : Any , _a : List[str] ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
_SCREAMING_SNAKE_CASE =self.sp_model.decode(_a )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
_SCREAMING_SNAKE_CASE =[]
else:
current_sub_tokens.append(_a )
_SCREAMING_SNAKE_CASE =self.sp_model.decode(_a )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def A ( self : Union[str, Any] , _a : List[Any] , _a : List[str]=None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def A ( self : Dict , _a : List[int] , _a : Optional[List[int]] = None , _a : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
_SCREAMING_SNAKE_CASE =[1] * len(self.prefix_tokens )
_SCREAMING_SNAKE_CASE =[1]
if token_ids_a is None:
return prefix_ones + ([0] * len(_a )) + suffix_ones
return prefix_ones + ([0] * len(_a )) + ([0] * len(_a )) + suffix_ones
def A ( self : str ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.__dict__.copy()
_SCREAMING_SNAKE_CASE =None
return state
def __setstate__( self : List[Any] , _a : Dict ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE =load_spm(self.spm_file , self.sp_model_kwargs )
def A ( self : Any , _a : str , _a : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =Path(_a )
assert save_dir.is_dir(), f"{save_directory} should be a directory"
_SCREAMING_SNAKE_CASE =save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
_SCREAMING_SNAKE_CASE =save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , _a )
if os.path.abspath(self.spm_file ) != os.path.abspath(_a ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _a )
elif not os.path.isfile(self.spm_file ):
with open(_a , 'wb' ) as fi:
_SCREAMING_SNAKE_CASE =self.sp_model.serialized_model_proto()
fi.write(_a )
return (str(_a ), str(_a ))
def _lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =sentencepiece.SentencePieceProcessor(**_UpperCamelCase )
spm.Load(str(_UpperCamelCase ) )
return spm
def _lowerCAmelCase ( _UpperCamelCase : str ) -> Union[Dict, List]:
"""simple docstring"""
with open(_UpperCamelCase , 'r' ) as f:
return json.load(_UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase : Dict , _UpperCamelCase : str ) -> None:
"""simple docstring"""
with open(_UpperCamelCase , 'w' ) as f:
json.dump(_UpperCamelCase , _UpperCamelCase , indent=2 )
| 405
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
UpperCamelCase_ = {
'configuration_gpt_neox_japanese': ['GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXJapaneseConfig'],
'tokenization_gpt_neox_japanese': ['GPTNeoXJapaneseTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoXJapaneseForCausalLM',
'GPTNeoXJapaneseLayer',
'GPTNeoXJapaneseModel',
'GPTNeoXJapanesePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 210
|
"""simple docstring"""
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = '▁'
UpperCamelCase_ = {'vocab_file': 'prophetnet.tokenizer'}
UpperCamelCase_ = {
'vocab_file': {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'
),
}
}
UpperCamelCase_ = {
'microsoft/xprophetnet-large-wiki100-cased': {'do_lower_case': False},
}
UpperCamelCase_ = {
'microsoft/xprophetnet-large-wiki100-cased': 512,
}
def UpperCamelCase ( UpperCAmelCase ) ->int:
"""simple docstring"""
a_ = collections.OrderedDict()
with open(UpperCAmelCase , "r" , encoding="utf-8" ) as reader:
a_ = reader.readlines()
for index, token in enumerate(UpperCAmelCase ):
a_ = token.rstrip("\n" )
a_ = index
return vocab
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : List[str] = VOCAB_FILES_NAMES
a_ : Any = PRETRAINED_VOCAB_FILES_MAP
a_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Tuple = ["""input_ids""", """attention_mask"""]
def __init__( self , __UpperCAmelCase , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[UNK]" , __UpperCAmelCase="[PAD]" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase = None , **__UpperCAmelCase , ) ->None:
a_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece")
raise
a_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(__UpperCAmelCase))
a_ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
a_ = {"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4}
for i in range(10):
a_ = F'''[unused{i}]'''
a_ = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
a_ = 12
a_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(__UpperCAmelCase)
def __getstate__( self) ->Union[str, Any]:
a_ = self.__dict__.copy()
a_ = None
return state
def __setstate__( self , __UpperCAmelCase) ->List[str]:
a_ = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece")
raise
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
a_ = {}
a_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False) ->List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase)
if token_ids_a is None:
return ([0] * len(__UpperCAmelCase)) + [1]
return ([0] * len(__UpperCAmelCase)) + [1] + ([0] * len(__UpperCAmelCase)) + [1]
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = None) ->List[int]:
a_ = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def UpperCAmelCase__ ( self) ->Any:
return len(self.sp_model) + self.fairseq_offset
def UpperCAmelCase__ ( self) ->List[str]:
a_ = {self.convert_ids_to_tokens(__UpperCAmelCase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->str:
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase)
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Union[str, Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a_ = self.sp_model.PieceToId(__UpperCAmelCase)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Optional[Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Dict:
a_ = "".join(__UpperCAmelCase).replace(__UpperCAmelCase , " ").strip()
return out_string
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = None) ->Tuple[str]:
if not os.path.isdir(__UpperCAmelCase):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''')
return
a_ = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(__UpperCAmelCase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , __UpperCAmelCase)
elif not os.path.isfile(self.vocab_file):
with open(__UpperCAmelCase , "wb") as fi:
a_ = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase)
return (out_vocab_file,)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = None) ->List[int]:
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
a_ = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 210
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Tuple = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = {
"""microsoft/cvt-13""": """https://huggingface.co/microsoft/cvt-13/resolve/main/config.json""",
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''cvt'''
def __init__( self : List[str] , UpperCAmelCase__ : str=3 , UpperCAmelCase__ : int=[7, 3, 3] , UpperCAmelCase__ : List[str]=[4, 2, 2] , UpperCAmelCase__ : Dict=[2, 1, 1] , UpperCAmelCase__ : Optional[Any]=[64, 192, 384] , UpperCAmelCase__ : Tuple=[1, 3, 6] , UpperCAmelCase__ : Optional[int]=[1, 2, 10] , UpperCAmelCase__ : Any=[4.0, 4.0, 4.0] , UpperCAmelCase__ : Optional[Any]=[0.0, 0.0, 0.0] , UpperCAmelCase__ : Dict=[0.0, 0.0, 0.0] , UpperCAmelCase__ : List[Any]=[0.0, 0.0, 0.1] , UpperCAmelCase__ : List[Any]=[True, True, True] , UpperCAmelCase__ : List[Any]=[False, False, True] , UpperCAmelCase__ : Any=["dw_bn", "dw_bn", "dw_bn"] , UpperCAmelCase__ : str=[3, 3, 3] , UpperCAmelCase__ : Optional[int]=[1, 1, 1] , UpperCAmelCase__ : Tuple=[2, 2, 2] , UpperCAmelCase__ : List[Any]=[1, 1, 1] , UpperCAmelCase__ : int=[1, 1, 1] , UpperCAmelCase__ : List[str]=0.02 , UpperCAmelCase__ : Union[str, Any]=1e-12 , **UpperCAmelCase__ : Any , ) ->Optional[Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase__)
A__ = num_channels
A__ = patch_sizes
A__ = patch_stride
A__ = patch_padding
A__ = embed_dim
A__ = num_heads
A__ = depth
A__ = mlp_ratio
A__ = attention_drop_rate
A__ = drop_rate
A__ = drop_path_rate
A__ = qkv_bias
A__ = cls_token
A__ = qkv_projection_method
A__ = kernel_qkv
A__ = padding_kv
A__ = stride_kv
A__ = padding_q
A__ = stride_q
A__ = initializer_range
A__ = layer_norm_eps
| 87
|
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> str:
a = VideoMAEConfig()
set_architecture_configs(__UpperCamelCase , __UpperCamelCase)
if "finetuned" not in model_name:
a = False
if "finetuned" in model_name:
a = "huggingface/label-files"
if "kinetics" in model_name:
a = 4_00
a = "kinetics400-id2label.json"
elif "ssv2" in model_name:
a = 1_74
a = "something-something-v2-id2label.json"
else:
raise ValueError("Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned.")
a = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type="dataset") , "r"))
a = {int(__UpperCamelCase): v for k, v in idalabel.items()}
a = idalabel
a = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase) -> Any:
if "small" in model_name:
a = 3_84
a = 15_36
a = 12
a = 16
a = 12
a = 3
a = 1_92
a = 7_68
elif "large" in model_name:
a = 10_24
a = 40_96
a = 24
a = 16
a = 12
a = 8
a = 5_12
a = 20_48
elif "huge" in model_name:
a = 12_80
a = 51_20
a = 32
a = 16
a = 12
a = 8
a = 6_40
a = 25_60
elif "base" not in model_name:
raise ValueError("Model name should include either \"small\", \"base\", \"large\", or \"huge\"")
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> Tuple:
if "encoder." in name:
a = name.replace("encoder." , "")
if "cls_token" in name:
a = name.replace("cls_token" , "videomae.embeddings.cls_token")
if "decoder_pos_embed" in name:
a = name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed")
if "pos_embed" in name and "decoder" not in name:
a = name.replace("pos_embed" , "videomae.embeddings.position_embeddings")
if "patch_embed.proj" in name:
a = name.replace("patch_embed.proj" , "videomae.embeddings.patch_embeddings.projection")
if "patch_embed.norm" in name:
a = name.replace("patch_embed.norm" , "videomae.embeddings.norm")
if "decoder.blocks" in name:
a = name.replace("decoder.blocks" , "decoder.decoder_layers")
if "blocks" in name:
a = name.replace("blocks" , "videomae.encoder.layer")
if "attn.proj" in name:
a = name.replace("attn.proj" , "attention.output.dense")
if "attn" in name and "bias" not in name:
a = name.replace("attn" , "attention.self")
if "attn" in name:
a = name.replace("attn" , "attention.attention")
if "norm1" in name:
a = name.replace("norm1" , "layernorm_before")
if "norm2" in name:
a = name.replace("norm2" , "layernorm_after")
if "mlp.fc1" in name:
a = name.replace("mlp.fc1" , "intermediate.dense")
if "mlp.fc2" in name:
a = name.replace("mlp.fc2" , "output.dense")
if "decoder_embed" in name:
a = name.replace("decoder_embed" , "decoder.decoder_embed")
if "decoder_norm" in name:
a = name.replace("decoder_norm" , "decoder.decoder_norm")
if "decoder_pred" in name:
a = name.replace("decoder_pred" , "decoder.decoder_pred")
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
a = name.replace("norm.weight" , "videomae.layernorm.weight")
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
a = name.replace("norm.bias" , "videomae.layernorm.bias")
if "head" in name and "decoder" not in name:
a = name.replace("head" , "classifier")
return name
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase) -> str:
for key in orig_state_dict.copy().keys():
a = orig_state_dict.pop(__UpperCamelCase)
if key.startswith("encoder."):
a = key.replace("encoder." , "")
if "qkv" in key:
a = key.split(".")
if key.startswith("decoder.blocks"):
a = config.decoder_hidden_size
a = int(key_split[2])
a = "decoder.decoder_layers."
if "weight" in key:
a = val[:dim, :]
a = val[dim : dim * 2, :]
a = val[-dim:, :]
else:
a = config.hidden_size
a = int(key_split[1])
a = "videomae.encoder.layer."
if "weight" in key:
a = val[:dim, :]
a = val[dim : dim * 2, :]
a = val[-dim:, :]
else:
a = val
return orig_state_dict
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
a = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset")
a = np.load(__UpperCamelCase)
return list(__UpperCamelCase)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase) -> List[Any]:
a = get_videomae_config(__UpperCamelCase)
if "finetuned" in model_name:
a = VideoMAEForVideoClassification(__UpperCamelCase)
else:
a = VideoMAEForPreTraining(__UpperCamelCase)
# download original checkpoint, hosted on Google Drive
a = "pytorch_model.bin"
gdown.cached_download(__UpperCamelCase , __UpperCamelCase , quiet=__UpperCamelCase)
a = torch.load(__UpperCamelCase , map_location="cpu")
if "model" in files:
a = files["model"]
else:
a = files["module"]
a = convert_state_dict(__UpperCamelCase , __UpperCamelCase)
model.load_state_dict(__UpperCamelCase)
model.eval()
# verify model on basic input
a = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5])
a = prepare_video()
a = image_processor(__UpperCamelCase , return_tensors="pt")
if "finetuned" not in model_name:
a = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt")
a = torch.load(__UpperCamelCase)
a = model(**__UpperCamelCase)
a = outputs.logits
a = [
"videomae-small-finetuned-kinetics",
"videomae-small-finetuned-ssv2",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"videomae-base-short",
"videomae-base-short-finetuned-kinetics",
"videomae-base",
"videomae-base-finetuned-kinetics",
"videomae-large",
"videomae-large-finetuned-kinetics",
"videomae-huge-finetuned-kinetics",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"videomae-base-short-ssv2",
"videomae-base-short-finetuned-ssv2",
"videomae-base-ssv2",
"videomae-base-finetuned-ssv2",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
a = torch.Size([1, 4_00])
a = torch.tensor([-0.9_291, -0.4_061, -0.9_307])
elif model_name == "videomae-small-finetuned-ssv2":
a = torch.Size([1, 1_74])
a = torch.tensor([0.2_671, -0.4_689, -0.8_235])
elif model_name == "videomae-base":
a = torch.Size([1, 14_08, 15_36])
a = torch.tensor([[0.7_739, 0.7_968, 0.7_089], [0.6_701, 0.7_487, 0.6_209], [0.4_287, 0.5_158, 0.4_773]])
elif model_name == "videomae-base-short":
a = torch.Size([1, 14_08, 15_36])
a = torch.tensor([[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]])
# we verified the loss both for normalized and unnormalized targets for this one
a = torch.tensor([0.5_142]) if config.norm_pix_loss else torch.tensor([0.6_469])
elif model_name == "videomae-large":
a = torch.Size([1, 14_08, 15_36])
a = torch.tensor([[0.7_149, 0.7_997, 0.6_966], [0.6_768, 0.7_869, 0.6_948], [0.5_139, 0.6_221, 0.5_605]])
elif model_name == "videomae-large-finetuned-kinetics":
a = torch.Size([1, 4_00])
a = torch.tensor([0.0_771, 0.0_011, -0.3_625])
elif model_name == "videomae-huge-finetuned-kinetics":
a = torch.Size([1, 4_00])
a = torch.tensor([0.2_433, 0.1_632, -0.4_894])
elif model_name == "videomae-base-short-finetuned-kinetics":
a = torch.Size([1, 4_00])
a = torch.tensor([0.6_588, 0.0_990, -0.2_493])
elif model_name == "videomae-base-finetuned-kinetics":
a = torch.Size([1, 4_00])
a = torch.tensor([0.3_669, -0.0_688, -0.2_421])
elif model_name == "videomae-base-short-ssv2":
a = torch.Size([1, 14_08, 15_36])
a = torch.tensor([[0.4_712, 0.5_296, 0.5_786], [0.2_278, 0.2_729, 0.4_026], [0.0_352, 0.0_730, 0.2_506]])
elif model_name == "videomae-base-short-finetuned-ssv2":
a = torch.Size([1, 1_74])
a = torch.tensor([-0.0_537, -0.1_539, -0.3_266])
elif model_name == "videomae-base-ssv2":
a = torch.Size([1, 14_08, 15_36])
a = torch.tensor([[0.8_131, 0.8_727, 0.8_546], [0.7_366, 0.9_377, 0.8_870], [0.5_935, 0.8_874, 0.8_564]])
elif model_name == "videomae-base-finetuned-ssv2":
a = torch.Size([1, 1_74])
a = torch.tensor([0.1_961, -0.8_337, -0.6_389])
else:
raise ValueError(f'''Model name not supported. Should be one of {model_names}''')
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , __UpperCamelCase , atol=1e-4)
else:
print("Logits:" , logits[0, :3, :3])
assert torch.allclose(logits[0, :3, :3] , __UpperCamelCase , atol=1e-4)
print("Logits ok!")
# verify loss, if applicable
if model_name == "videomae-base-short":
a = outputs.loss
assert torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-4)
print("Loss ok!")
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''')
image_processor.save_pretrained(__UpperCamelCase)
model.save_pretrained(__UpperCamelCase)
if push_to_hub:
print("Pushing to the hub...")
model.push_to_hub(__UpperCamelCase , organization="nielsr")
if __name__ == "__main__":
lowercase__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4",
type=str,
help=(
"URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"
" download link."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="/Users/nielsrogge/Documents/VideoMAE/Test",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--model_name", default="videomae-base", type=str, help="Name of the model.")
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
lowercase__ : Any = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 515
| 0
|
'''simple docstring'''
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'''The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'''
)
_UpperCAmelCase : Optional[Any] = None
_UpperCAmelCase : List[str] = {
'''7B''': 1_10_08,
'''13B''': 1_38_24,
'''30B''': 1_79_20,
'''65B''': 2_20_16,
'''70B''': 2_86_72,
}
_UpperCAmelCase : Union[str, Any] = {
'''7B''': 1,
'''7Bf''': 1,
'''13B''': 2,
'''13Bf''': 2,
'''30B''': 4,
'''65B''': 8,
'''70B''': 8,
'''70Bf''': 8,
}
def UpperCamelCase ( lowercase_ : Tuple , lowercase_ : Optional[int]=1 , lowercase_ : int=2_5_6 ) -> Union[str, Any]:
'''simple docstring'''
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def UpperCamelCase ( lowercase_ : List[str] ) -> int:
'''simple docstring'''
with open(lowercase_ , '''r''' ) as f:
return json.load(lowercase_ )
def UpperCamelCase ( lowercase_ : int , lowercase_ : List[str] ) -> List[str]:
'''simple docstring'''
with open(lowercase_ , '''w''' ) as f:
json.dump(lowercase_ , lowercase_ )
def UpperCamelCase ( lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : Optional[int]=True ) -> Optional[int]:
'''simple docstring'''
os.makedirs(lowercase_ , exist_ok=lowercase_ )
lowercase =os.path.join(lowercase_ , '''tmp''' )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
lowercase =read_json(os.path.join(lowercase_ , '''params.json''' ) )
lowercase =NUM_SHARDS[model_size]
lowercase =params['''n_layers''']
lowercase =params['''n_heads''']
lowercase =n_heads // num_shards
lowercase =params['''dim''']
lowercase =dim // n_heads
lowercase =1_0_0_0_0.0
lowercase =1.0 / (base ** (torch.arange(0 , lowercase_ , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
lowercase =params['''n_kv_heads'''] # for GQA / MQA
lowercase =n_heads_per_shard // num_key_value_heads
lowercase =dim // num_key_value_heads
else: # compatibility with other checkpoints
lowercase =n_heads
lowercase =n_heads_per_shard
lowercase =dim
# permute for sliced rotary
def permute(lowercase_ : List[str] , lowercase_ : List[str]=n_heads , lowercase_ : Dict=dim , lowercase_ : List[Any]=dim ):
return w.view(lowercase_ , dima // n_heads // 2 , 2 , lowercase_ ).transpose(1 , 2 ).reshape(lowercase_ , lowercase_ )
print(f'Fetching all parameters from the checkpoint at {input_base_path}.' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
lowercase =torch.load(os.path.join(lowercase_ , '''consolidated.00.pth''' ) , map_location='''cpu''' )
else:
# Sharded
lowercase =[
torch.load(os.path.join(lowercase_ , f'consolidated.{i:02d}.pth' ) , map_location='''cpu''' )
for i in range(lowercase_ )
]
lowercase =0
lowercase ={'''weight_map''': {}}
for layer_i in range(lowercase_ ):
lowercase =f'pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
lowercase ={
f'model.layers.{layer_i}.self_attn.q_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wq.weight'] ),
f'model.layers.{layer_i}.self_attn.k_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wk.weight'] ),
f'model.layers.{layer_i}.self_attn.v_proj.weight': loaded[f'layers.{layer_i}.attention.wv.weight'],
f'model.layers.{layer_i}.self_attn.o_proj.weight': loaded[f'layers.{layer_i}.attention.wo.weight'],
f'model.layers.{layer_i}.mlp.gate_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w1.weight'],
f'model.layers.{layer_i}.mlp.down_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w2.weight'],
f'model.layers.{layer_i}.mlp.up_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w3.weight'],
f'model.layers.{layer_i}.input_layernorm.weight': loaded[f'layers.{layer_i}.attention_norm.weight'],
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[f'layers.{layer_i}.ffn_norm.weight'],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
lowercase ={
f'model.layers.{layer_i}.input_layernorm.weight': loaded[0][
f'layers.{layer_i}.attention_norm.weight'
].clone(),
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[0][
f'layers.{layer_i}.ffn_norm.weight'
].clone(),
}
lowercase =permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wq.weight'].view(lowercase_ , lowercase_ , lowercase_ )
for i in range(lowercase_ )
] , dim=0 , ).reshape(lowercase_ , lowercase_ ) )
lowercase =permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wk.weight'].view(
lowercase_ , lowercase_ , lowercase_ )
for i in range(lowercase_ )
] , dim=0 , ).reshape(lowercase_ , lowercase_ ) , lowercase_ , lowercase_ , lowercase_ , )
lowercase =torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wv.weight'].view(
lowercase_ , lowercase_ , lowercase_ )
for i in range(lowercase_ )
] , dim=0 , ).reshape(lowercase_ , lowercase_ )
lowercase =torch.cat(
[loaded[i][f'layers.{layer_i}.attention.wo.weight'] for i in range(lowercase_ )] , dim=1 )
lowercase =torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w1.weight'] for i in range(lowercase_ )] , dim=0 )
lowercase =torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w2.weight'] for i in range(lowercase_ )] , dim=1 )
lowercase =torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w3.weight'] for i in range(lowercase_ )] , dim=0 )
lowercase =inv_freq
for k, v in state_dict.items():
lowercase =filename
param_count += v.numel()
torch.save(lowercase_ , os.path.join(lowercase_ , lowercase_ ) )
lowercase =f'pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
lowercase ={
'''model.embed_tokens.weight''': loaded['''tok_embeddings.weight'''],
'''model.norm.weight''': loaded['''norm.weight'''],
'''lm_head.weight''': loaded['''output.weight'''],
}
else:
lowercase ={
'''model.norm.weight''': loaded[0]['''norm.weight'''],
'''model.embed_tokens.weight''': torch.cat(
[loaded[i]['''tok_embeddings.weight'''] for i in range(lowercase_ )] , dim=1 ),
'''lm_head.weight''': torch.cat([loaded[i]['''output.weight'''] for i in range(lowercase_ )] , dim=0 ),
}
for k, v in state_dict.items():
lowercase =filename
param_count += v.numel()
torch.save(lowercase_ , os.path.join(lowercase_ , lowercase_ ) )
# Write configs
lowercase ={'''total_size''': param_count * 2}
write_json(lowercase_ , os.path.join(lowercase_ , '''pytorch_model.bin.index.json''' ) )
lowercase =params['''ffn_dim_multiplier'''] if '''ffn_dim_multiplier''' in params else 1
lowercase =params['''multiple_of'''] if '''multiple_of''' in params else 2_5_6
lowercase =LlamaConfig(
hidden_size=lowercase_ , intermediate_size=compute_intermediate_size(lowercase_ , lowercase_ , lowercase_ ) , num_attention_heads=params['''n_heads'''] , num_hidden_layers=params['''n_layers'''] , rms_norm_eps=params['''norm_eps'''] , num_key_value_heads=lowercase_ , )
config.save_pretrained(lowercase_ )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('''Loading the checkpoint in a Llama model.''' )
lowercase =LlamaForCausalLM.from_pretrained(lowercase_ , torch_dtype=torch.floataa , low_cpu_mem_usage=lowercase_ )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('''Saving in the Transformers format.''' )
model.save_pretrained(lowercase_ , safe_serialization=lowercase_ )
shutil.rmtree(lowercase_ )
def UpperCamelCase ( lowercase_ : List[str] , lowercase_ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
lowercase =LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f'Saving a {tokenizer_class.__name__} to {tokenizer_path}.' )
lowercase =tokenizer_class(lowercase_ )
tokenizer.save_pretrained(lowercase_ )
def UpperCamelCase ( ) -> Dict:
'''simple docstring'''
lowercase =argparse.ArgumentParser()
parser.add_argument(
'''--input_dir''' , help='''Location of LLaMA weights, which contains tokenizer.model and model folders''' , )
parser.add_argument(
'''--model_size''' , choices=['''7B''', '''7Bf''', '''13B''', '''13Bf''', '''30B''', '''65B''', '''70B''', '''70Bf''', '''tokenizer_only'''] , )
parser.add_argument(
'''--output_dir''' , help='''Location to write HF model and tokenizer''' , )
parser.add_argument('''--safe_serialization''' , type=lowercase_ , help='''Whether or not to save using `safetensors`.''' )
lowercase =parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
lowercase =os.path.join(args.input_dir , '''tokenizer.model''' )
write_tokenizer(args.output_dir , lowercase_ )
if __name__ == "__main__":
main()
| 145
|
'''simple docstring'''
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
_UpperCAmelCase : Tuple = False
class __magic_name__ ( unittest.TestCase ):
def _A( self , snake_case_=32 ):
set_seed(0 )
lowercase =UNetaDModel(sample_size=snake_case_ , in_channels=3 , out_channels=3 )
lowercase =torch.optim.SGD(model.parameters() , lr=0.00_01 )
return model, optimizer
@slow
def _A( self ):
lowercase ='''cpu''' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
lowercase =DDPMScheduler(
num_train_timesteps=10_00 , beta_start=0.00_01 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=snake_case_ , )
lowercase =DDIMScheduler(
num_train_timesteps=10_00 , beta_start=0.00_01 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=snake_case_ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
lowercase =[torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(snake_case_ ) for _ in range(4 )]
lowercase =[torch.randn((4, 3, 32, 32) ).to(snake_case_ ) for _ in range(4 )]
lowercase =[torch.randint(0 , 10_00 , (4,) ).long().to(snake_case_ ) for _ in range(4 )]
# train with a DDPM scheduler
lowercase , lowercase =self.get_model_optimizer(resolution=32 )
model.train().to(snake_case_ )
for i in range(4 ):
optimizer.zero_grad()
lowercase =ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowercase =model(snake_case_ , timesteps[i] ).sample
lowercase =torch.nn.functional.mse_loss(snake_case_ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
lowercase , lowercase =self.get_model_optimizer(resolution=32 )
model.train().to(snake_case_ )
for i in range(4 ):
optimizer.zero_grad()
lowercase =ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowercase =model(snake_case_ , timesteps[i] ).sample
lowercase =torch.nn.functional.mse_loss(snake_case_ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(snake_case_ , snake_case_ , atol=1E-5 ) )
self.assertTrue(torch.allclose(snake_case_ , snake_case_ , atol=1E-5 ) )
| 145
| 1
|
from __future__ import annotations
import time
__A : int = list[tuple[int, int]]
__A : str = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__A : Tuple = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class __A :
def __init__( self : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Node | None ):
lowerCAmelCase : Optional[int] = pos_x
lowerCAmelCase : str = pos_y
lowerCAmelCase : Dict = (pos_y, pos_x)
lowerCAmelCase : Tuple = goal_x
lowerCAmelCase : List[str] = goal_y
lowerCAmelCase : str = parent
class __A :
def __init__( self : Union[str, Any] , UpperCAmelCase_ : tuple[int, int] , UpperCAmelCase_ : tuple[int, int] ):
lowerCAmelCase : List[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , UpperCAmelCase_ )
lowerCAmelCase : str = Node(goal[1] , goal[0] , goal[1] , goal[0] , UpperCAmelCase_ )
lowerCAmelCase : Any = [self.start]
lowerCAmelCase : Any = False
def lowercase__ ( self : str ):
while self.node_queue:
lowerCAmelCase : Optional[Any] = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
lowerCAmelCase : Dict = True
return self.retrace_path(UpperCAmelCase_ )
lowerCAmelCase : str = self.get_successors(UpperCAmelCase_ )
for node in successors:
self.node_queue.append(UpperCAmelCase_ )
if not self.reached:
return [self.start.pos]
return None
def lowercase__ ( self : Optional[Any] , UpperCAmelCase_ : Node ):
lowerCAmelCase : Tuple = []
for action in delta:
lowerCAmelCase : Any = parent.pos_x + action[1]
lowerCAmelCase : List[str] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(UpperCAmelCase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(UpperCAmelCase_ , UpperCAmelCase_ , self.target.pos_y , self.target.pos_x , UpperCAmelCase_ ) )
return successors
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : Node | None ):
lowerCAmelCase : Tuple = node
lowerCAmelCase : str = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowerCAmelCase : int = current_node.parent
path.reverse()
return path
class __A :
def __init__( self : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str ):
lowerCAmelCase : Tuple = BreadthFirstSearch(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = BreadthFirstSearch(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = False
def lowercase__ ( self : List[str] ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
lowerCAmelCase : int = self.fwd_bfs.node_queue.pop(0 )
lowerCAmelCase : str = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
lowerCAmelCase : List[str] = True
return self.retrace_bidirectional_path(
UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : Tuple = current_bwd_node
lowerCAmelCase : Optional[int] = current_fwd_node
lowerCAmelCase : int = {
self.fwd_bfs: self.fwd_bfs.get_successors(UpperCAmelCase_ ),
self.bwd_bfs: self.bwd_bfs.get_successors(UpperCAmelCase_ ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(UpperCAmelCase_ )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : Node , UpperCAmelCase_ : Node ):
lowerCAmelCase : Tuple = self.fwd_bfs.retrace_path(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = self.bwd_bfs.retrace_path(UpperCAmelCase_ )
bwd_path.pop()
bwd_path.reverse()
lowerCAmelCase : Optional[int] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
__A : Optional[Any] = (0, 0)
__A : Optional[Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__A : Optional[Any] = time.time()
__A : List[str] = BreadthFirstSearch(init, goal)
__A : Optional[Any] = bfs.search()
__A : List[Any] = time.time() - start_bfs_time
print('''Unidirectional BFS computation time : ''', bfs_time)
__A : List[Any] = time.time()
__A : List[str] = BidirectionalBreadthFirstSearch(init, goal)
__A : Union[str, Any] = bd_bfs.search()
__A : List[Any] = time.time() - start_bd_bfs_time
print('''Bidirectional BFS computation time : ''', bd_bfs_time)
| 343
|
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
def wrapper(*_UpperCAmelCase, **_UpperCAmelCase ):
lowerCAmelCase : str = timeit.default_timer()
lowerCAmelCase : str = func(*_UpperCAmelCase, **_UpperCAmelCase )
lowerCAmelCase : Optional[int] = timeit.default_timer() - starttime
return delta
lowerCAmelCase : Union[str, Any] = func.__name__
return wrapper
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase=100, _UpperCAmelCase=None ) -> Any:
'''simple docstring'''
lowerCAmelCase : Dict = []
lowerCAmelCase : Optional[int] = seq_shapes or {}
for i in range(_UpperCAmelCase ):
lowerCAmelCase : Any = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(_UpperCAmelCase, _ArrayXD ):
lowerCAmelCase : Dict = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(_UpperCAmelCase, datasets.Value ):
if v.dtype == "string":
lowerCAmelCase : Any = 'The small grey turtle was surprisingly fast when challenged.'
else:
lowerCAmelCase : Optional[Any] = np.random.randint(10, size=1 ).astype(v.dtype ).item()
elif isinstance(_UpperCAmelCase, datasets.Sequence ):
while isinstance(_UpperCAmelCase, datasets.Sequence ):
lowerCAmelCase : int = v.feature
lowerCAmelCase : Optional[int] = seq_shapes[k]
lowerCAmelCase : str = np.random.rand(*_UpperCAmelCase ).astype(v.dtype )
lowerCAmelCase : Any = data
dummy_data.append((i, example) )
return dummy_data
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=100, _UpperCAmelCase=None ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase : Any = generate_examples(_UpperCAmelCase, num_examples=_UpperCAmelCase, seq_shapes=_UpperCAmelCase )
with ArrowWriter(features=_UpperCAmelCase, path=_UpperCAmelCase ) as writer:
for key, record in dummy_data:
lowerCAmelCase : Any = features.encode_example(_UpperCAmelCase )
writer.write(_UpperCAmelCase )
lowerCAmelCase , lowerCAmelCase : Optional[int] = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." )
lowerCAmelCase : int = datasets.Dataset.from_file(filename=_UpperCAmelCase, info=datasets.DatasetInfo(features=_UpperCAmelCase ) )
return dataset
| 343
| 1
|
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def A_( A : List[Any]):
UpperCamelCase , UpperCamelCase = image.size
UpperCamelCase , UpperCamelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
UpperCamelCase = image.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'])
UpperCamelCase = np.array(A).astype(np.floataa) / 255.0
UpperCamelCase = image[None].transpose(0 , 3 , 1 , 2)
UpperCamelCase = torch.from_numpy(A)
return 2.0 * image - 1.0
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , A_ , A_ , A_ , )-> Union[str, Any]:
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=A_ , unet=A_ , scheduler=A_ )
@torch.no_grad()
def __call__( self , A_ = None , A_ = 1 , A_ = 100 , A_ = 0.0 , A_ = None , A_ = "pil" , A_ = True , )-> Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
if isinstance(A_ , PIL.Image.Image ):
UpperCamelCase = 1
elif isinstance(A_ , torch.Tensor ):
UpperCamelCase = image.shape[0]
else:
raise ValueError(F'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(A_ )}''' )
if isinstance(A_ , PIL.Image.Image ):
UpperCamelCase = preprocess(A_ )
UpperCamelCase , UpperCamelCase = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
UpperCamelCase = (batch_size, self.unet.config.in_channels // 2, height, width)
UpperCamelCase = next(self.unet.parameters() ).dtype
UpperCamelCase = randn_tensor(A_ , generator=A_ , device=self.device , dtype=A_ )
UpperCamelCase = image.to(device=self.device , dtype=A_ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(A_ , device=self.device )
UpperCamelCase = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase = {}
if accepts_eta:
UpperCamelCase = eta
for t in self.progress_bar(A_ ):
# concat latents and low resolution image in the channel dimension.
UpperCamelCase = torch.cat([latents, image] , dim=1 )
UpperCamelCase = self.scheduler.scale_model_input(A_ , A_ )
# predict the noise residual
UpperCamelCase = self.unet(A_ , A_ ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase = self.scheduler.step(A_ , A_ , A_ , **A_ ).prev_sample
# decode the image latents with the VQVAE
UpperCamelCase = self.vqvae.decode(A_ ).sample
UpperCamelCase = torch.clamp(A_ , -1.0 , 1.0 )
UpperCamelCase = image / 2 + 0.5
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ )
| 432
|
'''simple docstring'''
lowerCAmelCase : Optional[Any] = '\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
lowerCAmelCase : Optional[int] = [{'type': 'code', 'content': INSTALL_CONTENT}]
lowerCAmelCase : Optional[int] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 432
| 1
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __A ( metaclass=snake_case__ ):
_UpperCamelCase : Optional[Any] = ["onnx"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""onnx"""] )
@classmethod
def __A ( cls , *a__ , **a__ ):
requires_backends(cls , ["""onnx"""] )
@classmethod
def __A ( cls , *a__ , **a__ ):
requires_backends(cls , ["""onnx"""] )
| 213
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
__A = None
__A = logging.get_logger(__name__)
__A = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__A = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/tokenizer.json''',
},
}
__A = {
'''camembert-base''': 512,
}
__A = '''▁'''
class lowercase ( snake_case__):
"""simple docstring"""
a__ : Dict = VOCAB_FILES_NAMES
a__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
a__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : Optional[Any] = ["input_ids", "attention_mask"]
a__ : Union[str, Any] = CamembertTokenizer
def __init__( self : int , __UpperCAmelCase : str=None , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Optional[Any]="<s>" , __UpperCAmelCase : Dict="</s>" , __UpperCAmelCase : Union[str, Any]="</s>" , __UpperCAmelCase : str="<s>" , __UpperCAmelCase : List[str]="<unk>" , __UpperCAmelCase : Any="<pad>" , __UpperCAmelCase : int="<mask>" , __UpperCAmelCase : str=["<s>NOTUSED", "</s>NOTUSED"] , **__UpperCAmelCase : Optional[Any] , ) -> Tuple:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_= AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , )
UpperCAmelCase_= vocab_file
UpperCAmelCase_= False if not self.vocab_file else True
def _SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_= [self.cls_token_id]
UpperCAmelCase_= [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _SCREAMING_SNAKE_CASE ( self : List[str] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase_= [self.sep_token_id]
UpperCAmelCase_= [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_= os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file , __UpperCAmelCase )
return (out_vocab_file,)
| 593
| 0
|
'''simple docstring'''
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[int] , a : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = parent
def __UpperCamelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
return {}
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : str = "<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR=\"FFFFFF\">\n <HR>\n <a href=\"http://google.com\">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style=\"color:#0000FF\">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>"
SCREAMING_SNAKE_CASE : str = "\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n "
return [html_string_a, html_string_a]
@require_bsa
class _UpperCamelCase ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =MarkupLMFeatureExtractor if is_bsa_available() else None
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = MarkupLMFeatureExtractionTester(self )
@property
def __UpperCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
return self.feature_extract_tester.prepare_feat_extract_dict()
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.feature_extraction_class()
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[int] = get_html_strings()[0]
SCREAMING_SNAKE_CASE : Optional[Any] = feature_extractor(a )
# fmt: off
SCREAMING_SNAKE_CASE : Any = [["sample document", "Goog", "This is one header", "This is a another Header", "Travel from", "SFO to JFK", "on May 2, 2015 at 2:00 pm. For details go to confirm.com", "Traveler", "name", "is", "John Doe"]]
SCREAMING_SNAKE_CASE : str = [["/html/head/title", "/html/body/a", "/html/body/h1", "/html/body/h2", "/html/body/p", "/html/body/p/p/b[1]", "/html/body/p/p/b[2]/i", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/b", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/p"]]
# fmt: on
self.assertEqual(encoding.nodes , a )
self.assertEqual(encoding.xpaths , a )
# Test batched
SCREAMING_SNAKE_CASE : Any = get_html_strings()
SCREAMING_SNAKE_CASE : Dict = feature_extractor(a )
# fmt: off
SCREAMING_SNAKE_CASE : Any = expected_nodes + [["My First Heading", "My first paragraph."]]
SCREAMING_SNAKE_CASE : Dict = expected_xpaths + [["/html/body/h1", "/html/body/p"]]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , a )
self.assertEqual(encoding.xpaths , a )
| 716
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(__A ) , 'Tatoeba directory does not exist.' )
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCamelCase ( self : Any ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = tempfile.mkdtemp()
return TatoebaConverter(save_dir=a )
@slow
def __UpperCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
self.resolver.convert_models(["heb-eng"] )
@slow
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[Any] = self.resolver.write_model_card("opus-mt-he-en" , dry_run=a )
assert mmeta["long_pair"] == "heb-eng"
| 193
| 0
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = [[1, 2, 4], [1, 2, 3, 4]]
__SCREAMING_SNAKE_CASE : Tuple = DisjunctiveConstraint(_A )
self.assertTrue(isinstance(dc.token_ids , _A ) )
with self.assertRaises(_A ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(_A ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_A ):
DisjunctiveConstraint(_A ) # fails here
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = [[1, 2, 3], [1, 2, 4]]
__SCREAMING_SNAKE_CASE : Optional[Any] = DisjunctiveConstraint(_A )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = dc.update(1 )
__SCREAMING_SNAKE_CASE : int = stepped is True and completed is False and reset is False
self.assertTrue(_A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = dc.update(2 )
__SCREAMING_SNAKE_CASE : Optional[Any] = stepped is True and completed is False and reset is False
self.assertTrue(_A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = dc.update(3 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = stepped is True and completed is True and reset is False
self.assertTrue(_A )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__SCREAMING_SNAKE_CASE : str = DisjunctiveConstraint(_A )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 74
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase_ = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 611
| 0
|
def _A ( __magic_name__ , __magic_name__ ):
while a != 0:
lowercase__ , lowercase__ = b % a, a
return b
def _A ( __magic_name__ , __magic_name__ ):
if gcd(__lowerCAmelCase , __lowerCAmelCase ) != 1:
lowercase__ = f'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(__lowerCAmelCase )
lowercase__ , lowercase__ , lowercase__ = 1, 0, a
lowercase__ , lowercase__ , lowercase__ = 0, 1, m
while va != 0:
lowercase__ = ua // va
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 712
|
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
_snake_case = logging.get_logger(__name__)
_snake_case = OrderedDict(
[
("""align""", """EfficientNetImageProcessor"""),
("""beit""", """BeitImageProcessor"""),
("""bit""", """BitImageProcessor"""),
("""blip""", """BlipImageProcessor"""),
("""blip-2""", """BlipImageProcessor"""),
("""bridgetower""", """BridgeTowerImageProcessor"""),
("""chinese_clip""", """ChineseCLIPImageProcessor"""),
("""clip""", """CLIPImageProcessor"""),
("""clipseg""", """ViTImageProcessor"""),
("""conditional_detr""", """ConditionalDetrImageProcessor"""),
("""convnext""", """ConvNextImageProcessor"""),
("""convnextv2""", """ConvNextImageProcessor"""),
("""cvt""", """ConvNextImageProcessor"""),
("""data2vec-vision""", """BeitImageProcessor"""),
("""deformable_detr""", """DeformableDetrImageProcessor"""),
("""deit""", """DeiTImageProcessor"""),
("""deta""", """DetaImageProcessor"""),
("""detr""", """DetrImageProcessor"""),
("""dinat""", """ViTImageProcessor"""),
("""donut-swin""", """DonutImageProcessor"""),
("""dpt""", """DPTImageProcessor"""),
("""efficientformer""", """EfficientFormerImageProcessor"""),
("""efficientnet""", """EfficientNetImageProcessor"""),
("""flava""", """FlavaImageProcessor"""),
("""focalnet""", """BitImageProcessor"""),
("""git""", """CLIPImageProcessor"""),
("""glpn""", """GLPNImageProcessor"""),
("""groupvit""", """CLIPImageProcessor"""),
("""imagegpt""", """ImageGPTImageProcessor"""),
("""instructblip""", """BlipImageProcessor"""),
("""layoutlmv2""", """LayoutLMv2ImageProcessor"""),
("""layoutlmv3""", """LayoutLMv3ImageProcessor"""),
("""levit""", """LevitImageProcessor"""),
("""mask2former""", """Mask2FormerImageProcessor"""),
("""maskformer""", """MaskFormerImageProcessor"""),
("""mgp-str""", """ViTImageProcessor"""),
("""mobilenet_v1""", """MobileNetV1ImageProcessor"""),
("""mobilenet_v2""", """MobileNetV2ImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevitv2""", """MobileViTImageProcessor"""),
("""nat""", """ViTImageProcessor"""),
("""oneformer""", """OneFormerImageProcessor"""),
("""owlvit""", """OwlViTImageProcessor"""),
("""perceiver""", """PerceiverImageProcessor"""),
("""pix2struct""", """Pix2StructImageProcessor"""),
("""poolformer""", """PoolFormerImageProcessor"""),
("""regnet""", """ConvNextImageProcessor"""),
("""resnet""", """ConvNextImageProcessor"""),
("""sam""", """SamImageProcessor"""),
("""segformer""", """SegformerImageProcessor"""),
("""swiftformer""", """ViTImageProcessor"""),
("""swin""", """ViTImageProcessor"""),
("""swin2sr""", """Swin2SRImageProcessor"""),
("""swinv2""", """ViTImageProcessor"""),
("""table-transformer""", """DetrImageProcessor"""),
("""timesformer""", """VideoMAEImageProcessor"""),
("""tvlt""", """TvltImageProcessor"""),
("""upernet""", """SegformerImageProcessor"""),
("""van""", """ConvNextImageProcessor"""),
("""videomae""", """VideoMAEImageProcessor"""),
("""vilt""", """ViltImageProcessor"""),
("""vit""", """ViTImageProcessor"""),
("""vit_hybrid""", """ViTHybridImageProcessor"""),
("""vit_mae""", """ViTImageProcessor"""),
("""vit_msn""", """ViTImageProcessor"""),
("""xclip""", """CLIPImageProcessor"""),
("""yolos""", """YolosImageProcessor"""),
]
)
_snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def _A ( __magic_name__ ):
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
lowercase__ = model_type_to_module_name(__magic_name__ )
lowercase__ = importlib.import_module(f'''.{module_name}''' , "transformers.models" )
try:
return getattr(__magic_name__ , __magic_name__ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(__magic_name__ , "__name__" , __magic_name__ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowercase__ = importlib.import_module("transformers" )
if hasattr(__magic_name__ , __magic_name__ ):
return getattr(__magic_name__ , __magic_name__ )
return None
def _A ( __magic_name__ , __magic_name__ = None , __magic_name__ = False , __magic_name__ = False , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = False , **__magic_name__ , ):
lowercase__ = get_file_from_repo(
__magic_name__ , __magic_name__ , cache_dir=__magic_name__ , force_download=__magic_name__ , resume_download=__magic_name__ , proxies=__magic_name__ , use_auth_token=__magic_name__ , revision=__magic_name__ , local_files_only=__magic_name__ , )
if resolved_config_file is None:
logger.info(
"Could not locate the image processor configuration file, will try to use the model config instead." )
return {}
with open(__magic_name__ , encoding="utf-8" ) as reader:
return json.load(__magic_name__ )
class lowerCAmelCase :
def __init__( self :List[Any] ):
'''simple docstring'''
raise EnvironmentError(
"AutoImageProcessor is designed to be instantiated "
"using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(_lowercase )
def UpperCAmelCase ( cls :Tuple , _lowercase :Any , **_lowercase :Union[str, Any] ):
'''simple docstring'''
lowercase__ = kwargs.pop("config" , _lowercase )
lowercase__ = kwargs.pop("trust_remote_code" , _lowercase )
lowercase__ = True
lowercase__ , lowercase__ = ImageProcessingMixin.get_image_processor_dict(_lowercase , **_lowercase )
lowercase__ = config_dict.get("image_processor_type" , _lowercase )
lowercase__ = None
if "AutoImageProcessor" in config_dict.get("auto_map" , {} ):
lowercase__ = config_dict["auto_map"]["AutoImageProcessor"]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
lowercase__ = config_dict.pop("feature_extractor_type" , _lowercase )
if feature_extractor_class is not None:
logger.warning(
"Could not find image processor class in the image processor config or the model config. Loading"
" based on pattern matching with the model's feature extractor configuration." )
lowercase__ = feature_extractor_class.replace("FeatureExtractor" , "ImageProcessor" )
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ):
lowercase__ = config_dict["auto_map"]["AutoFeatureExtractor"]
lowercase__ = feature_extractor_auto_map.replace("FeatureExtractor" , "ImageProcessor" )
logger.warning(
"Could not find image processor auto map in the image processor config or the model config."
" Loading based on pattern matching with the model's feature extractor configuration." )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(_lowercase , _lowercase ):
lowercase__ = AutoConfig.from_pretrained(_lowercase , **_lowercase )
# It could be in `config.image_processor_type``
lowercase__ = getattr(_lowercase , "image_processor_type" , _lowercase )
if hasattr(_lowercase , "auto_map" ) and "AutoImageProcessor" in config.auto_map:
lowercase__ = config.auto_map["AutoImageProcessor"]
if image_processor_class is not None:
lowercase__ = image_processor_class_from_name(_lowercase )
lowercase__ = image_processor_auto_map is not None
lowercase__ = image_processor_class is not None or type(_lowercase ) in IMAGE_PROCESSOR_MAPPING
lowercase__ = resolve_trust_remote_code(
_lowercase , _lowercase , _lowercase , _lowercase )
if has_remote_code and trust_remote_code:
lowercase__ = get_class_from_dynamic_module(
_lowercase , _lowercase , **_lowercase )
lowercase__ = kwargs.pop("code_revision" , _lowercase )
if os.path.isdir(_lowercase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(_lowercase , **_lowercase )
elif image_processor_class is not None:
return image_processor_class.from_dict(_lowercase , **_lowercase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(_lowercase ) in IMAGE_PROCESSOR_MAPPING:
lowercase__ = IMAGE_PROCESSOR_MAPPING[type(_lowercase )]
return image_processor_class.from_dict(_lowercase , **_lowercase )
raise ValueError(
f'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '''
f'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '''
f'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def UpperCAmelCase ( _lowercase :Optional[int] , _lowercase :Dict ):
'''simple docstring'''
IMAGE_PROCESSOR_MAPPING.register(_lowercase , _lowercase )
| 611
| 0
|
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : Any = [0 for i in range(len(__UpperCamelCase ) )]
# initialize interval's left pointer and right pointer
snake_case_ : Dict = 0, 0
for i in range(1 , len(__UpperCamelCase ) ):
# case when current index is inside the interval
if i <= right_pointer:
snake_case_ : Tuple = min(right_pointer - i + 1 , z_result[i - left_pointer] )
snake_case_ : Dict = min_edge
while go_next(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
snake_case_ : Union[str, Any] = i, i + z_result[i] - 1
return z_result
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : list[int] , __UpperCamelCase : str ):
'''simple docstring'''
return i + z_result[i] < len(__UpperCamelCase ) and s[z_result[i]] == s[i + z_result[i]]
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : str = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
snake_case_ : Optional[Any] = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(__UpperCamelCase ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
|
from __future__ import annotations
from math import pow, sqrt
def _A ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance == 0:
return {"resistance": sqrt(pow(SCREAMING_SNAKE_CASE , 2 ) - pow(SCREAMING_SNAKE_CASE , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(SCREAMING_SNAKE_CASE , 2 ) - pow(SCREAMING_SNAKE_CASE , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(SCREAMING_SNAKE_CASE , 2 ) + pow(SCREAMING_SNAKE_CASE , 2 ) )}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 563
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __a ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : List[Any] )-> Tuple:
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _SCREAMING_SNAKE_CASE ( self : int )-> str:
"""simple docstring"""
UpperCamelCase = 1
UpperCamelCase = 3
UpperCamelCase = (32, 32)
UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCAmelCase_ )
return image
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] )-> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] )-> Any:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple )-> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_006 , )
return RobertaSeriesModelWithTransformation(UpperCAmelCase_ )
@property
def _SCREAMING_SNAKE_CASE ( self : str )-> List[Any]:
"""simple docstring"""
def extract(*UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : str ):
class __a :
def __init__( self : Any )-> int:
"""simple docstring"""
UpperCamelCase = torch.ones([0] )
def _SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase_ : Optional[Any] )-> List[str]:
"""simple docstring"""
self.pixel_values.to(UpperCAmelCase_ )
return self
return Out()
return extract
def _SCREAMING_SNAKE_CASE ( self : List[Any] )-> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.dummy_cond_unet
UpperCamelCase = PNDMScheduler(skip_prk_steps=UpperCAmelCase_ )
UpperCamelCase = self.dummy_vae
UpperCamelCase = self.dummy_text_encoder
UpperCamelCase = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
UpperCamelCase = 77
UpperCamelCase = self.dummy_image.to(UpperCAmelCase_ )
UpperCamelCase = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
UpperCamelCase = AltDiffusionImgaImgPipeline(
unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , )
UpperCamelCase = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCAmelCase_ )
UpperCamelCase = alt_pipe.to(UpperCAmelCase_ )
alt_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
UpperCamelCase = "A painting of a squirrel eating a burger"
UpperCamelCase = torch.Generator(device=UpperCAmelCase_ ).manual_seed(0 )
UpperCamelCase = alt_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=UpperCAmelCase_ , )
UpperCamelCase = output.images
UpperCamelCase = torch.Generator(device=UpperCAmelCase_ ).manual_seed(0 )
UpperCamelCase = alt_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , )[0]
UpperCamelCase = image[0, -3:, -3:, -1]
UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.dummy_cond_unet
UpperCamelCase = PNDMScheduler(skip_prk_steps=UpperCAmelCase_ )
UpperCamelCase = self.dummy_vae
UpperCamelCase = self.dummy_text_encoder
UpperCamelCase = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
UpperCamelCase = 77
UpperCamelCase = self.dummy_image.to(UpperCAmelCase_ )
# put models in fp16
UpperCamelCase = unet.half()
UpperCamelCase = vae.half()
UpperCamelCase = bert.half()
# make sure here that pndm scheduler skips prk
UpperCamelCase = AltDiffusionImgaImgPipeline(
unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , )
UpperCamelCase = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCAmelCase_ )
UpperCamelCase = alt_pipe.to(UpperCAmelCase_ )
alt_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
UpperCamelCase = "A painting of a squirrel eating a burger"
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = alt_pipe(
[prompt] , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type="np" , image=UpperCAmelCase_ , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> Tuple:
"""simple docstring"""
UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
UpperCamelCase = init_image.resize((760, 504) )
UpperCamelCase = "BAAI/AltDiffusion"
UpperCamelCase = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
pipe.enable_attention_slicing()
UpperCamelCase = "A fantasy landscape, trending on artstation"
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = pipe(
prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , strength=0.75 , guidance_scale=7.5 , generator=UpperCAmelCase_ , output_type="np" , )
UpperCamelCase = output.images[0]
UpperCamelCase = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
UpperCamelCase = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __a ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Tuple )-> List[Any]:
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : Dict )-> Any:
"""simple docstring"""
UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
UpperCamelCase = init_image.resize((768, 512) )
UpperCamelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
UpperCamelCase = "BAAI/AltDiffusion"
UpperCamelCase = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
pipe.enable_attention_slicing()
UpperCamelCase = "A fantasy landscape, trending on artstation"
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = pipe(
prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , strength=0.75 , guidance_scale=7.5 , generator=UpperCAmelCase_ , output_type="np" , )
UpperCamelCase = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 556
|
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class __a ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> Dict:
"""simple docstring"""
UpperCamelCase = inspect.getfile(accelerate.test_utils )
UpperCamelCase = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps", "test_metrics.py"] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
UpperCamelCase = test_metrics
@require_cpu
def _SCREAMING_SNAKE_CASE ( self : Dict )-> Union[str, Any]:
"""simple docstring"""
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def _SCREAMING_SNAKE_CASE ( self : Tuple )-> List[str]:
"""simple docstring"""
debug_launcher(self.test_metrics.main )
@require_single_gpu
def _SCREAMING_SNAKE_CASE ( self : List[Any] )-> str:
"""simple docstring"""
self.test_metrics.main()
@require_multi_gpu
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] )-> Optional[Any]:
"""simple docstring"""
print(f"Found {torch.cuda.device_count()} devices." )
UpperCamelCase = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCAmelCase_ , env=os.environ.copy() )
| 556
| 1
|
'''simple docstring'''
from manim import *
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
def lowerCAmelCase ( self : Optional[Any] )-> Union[str, Any]:
snake_case = Rectangle(height=0.5 , width=0.5 )
snake_case = Rectangle(height=0.25 , width=0.25 )
snake_case = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
snake_case = [mem.copy() for i in range(6 )]
snake_case = [mem.copy() for i in range(6 )]
snake_case = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
snake_case = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
snake_case = VGroup(__snake_case , __snake_case ).arrange(__snake_case , buff=0 )
snake_case = Text("""CPU""" , font_size=24 )
snake_case = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__snake_case )
snake_case = [mem.copy() for i in range(4 )]
snake_case = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
snake_case = Text("""GPU""" , font_size=24 )
snake_case = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
gpu.move_to([-1, -1, 0] )
self.add(__snake_case )
snake_case = [mem.copy() for i in range(6 )]
snake_case = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
snake_case = Text("""Model""" , font_size=24 )
snake_case = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
model.move_to([3, -1.0, 0] )
self.add(__snake_case )
snake_case = []
snake_case = []
snake_case = []
for i, rect in enumerate(__snake_case ):
rect.set_stroke(__snake_case )
snake_case = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__snake_case , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__snake_case )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=__snake_case , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=__snake_case , buff=0.0 )
self.add(__snake_case )
model_cpu_arr.append(__snake_case )
self.add(*__snake_case , *__snake_case , *__snake_case )
snake_case = [mem.copy() for i in range(6 )]
snake_case = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
snake_case = Text("""Loaded Checkpoint""" , font_size=24 )
snake_case = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
checkpoint.move_to([3, 0.5, 0] )
self.add(__snake_case )
snake_case = []
snake_case = []
for i, rect in enumerate(__snake_case ):
snake_case = fill.copy().set_fill(__snake_case , opacity=0.7 )
target.move_to(__snake_case )
ckpt_arr.append(__snake_case )
snake_case = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(__snake_case )
self.add(*__snake_case , *__snake_case )
snake_case = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
snake_case = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__snake_case , __snake_case )
snake_case = MarkupText(
f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(__snake_case , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__snake_case )
snake_case = MarkupText(
f'''Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
snake_case = [meta_mem.copy() for i in range(6 )]
snake_case = [meta_mem.copy() for i in range(6 )]
snake_case = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
snake_case = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
snake_case = VGroup(__snake_case , __snake_case ).arrange(__snake_case , buff=0 )
snake_case = Text("""Disk""" , font_size=24 )
snake_case = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(__snake_case , run_time=3 ) , Write(__snake_case , run_time=1 ) , Create(__snake_case , run_time=1 ) )
snake_case = []
for i, rect in enumerate(__snake_case ):
snake_case = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(__snake_case , run_time=1.5 ) )
self.play(*__snake_case )
self.play(FadeOut(__snake_case ) )
snake_case = MarkupText(f'''Then, the checkpoint is removed from memory\nthrough garbage collection.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case , run_time=3 ) )
self.play(
FadeOut(__snake_case , __snake_case , *__snake_case , *__snake_case ) , )
self.wait()
| 369
|
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = ["image_processor", "tokenizer"]
snake_case_ = "LayoutLMv2ImageProcessor"
snake_case_ = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self : List[Any] , __snake_case : Union[str, Any]=None , __snake_case : Optional[int]=None , **__snake_case : Union[str, Any] )-> str:
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __snake_case , )
snake_case = kwargs.pop("""feature_extractor""" )
snake_case = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__snake_case , __snake_case )
def __call__( self : Optional[Any] , __snake_case : Dict , __snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __snake_case : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __snake_case : Union[List[List[int]], List[List[List[int]]]] = None , __snake_case : Optional[Union[List[int], List[List[int]]]] = None , __snake_case : bool = True , __snake_case : Union[bool, str, PaddingStrategy] = False , __snake_case : Union[bool, str, TruncationStrategy] = None , __snake_case : Optional[int] = None , __snake_case : int = 0 , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = True , __snake_case : Optional[Union[str, TensorType]] = None , **__snake_case : List[str] , )-> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes """
"""if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("""You cannot return overflowing tokens without returning the offsets mapping.""" )
# first, apply the image processor
snake_case = self.image_processor(images=__snake_case , return_tensors=__snake_case )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__snake_case , __snake_case ):
snake_case = [text] # add batch dimension (as the image processor always adds a batch dimension)
snake_case = features["""words"""]
snake_case = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=__snake_case , add_special_tokens=__snake_case , padding=__snake_case , truncation=__snake_case , max_length=__snake_case , stride=__snake_case , pad_to_multiple_of=__snake_case , return_token_type_ids=__snake_case , return_attention_mask=__snake_case , return_overflowing_tokens=__snake_case , return_special_tokens_mask=__snake_case , return_offsets_mapping=__snake_case , return_length=__snake_case , verbose=__snake_case , return_tensors=__snake_case , **__snake_case , )
# add pixel values
snake_case = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
snake_case = self.get_overflowing_images(__snake_case , encoded_inputs["""overflow_to_sample_mapping"""] )
snake_case = images
return encoded_inputs
def lowerCAmelCase ( self : Any , __snake_case : List[Any] , __snake_case : str )-> List[str]:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
snake_case = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__snake_case ) != len(__snake_case ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
f''' {len(__snake_case )} and {len(__snake_case )}''' )
return images_with_overflow
def lowerCAmelCase ( self : int , *__snake_case : Optional[int] , **__snake_case : Tuple )-> str:
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def lowerCAmelCase ( self : Dict , *__snake_case : Tuple , **__snake_case : Optional[int] )-> List[Any]:
return self.tokenizer.decode(*__snake_case , **__snake_case )
@property
def lowerCAmelCase ( self : str )-> int:
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def lowerCAmelCase ( self : Union[str, Any] )-> List[str]:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __snake_case , )
return self.image_processor_class
@property
def lowerCAmelCase ( self : Optional[Any] )-> Tuple:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __snake_case , )
return self.image_processor
| 369
| 1
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=13 , __lowercase=30 , __lowercase=2 , __lowercase=3 , __lowercase=True , __lowercase=True , __lowercase=32 , __lowercase=2 , __lowercase=4 , __lowercase=37 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=10 , __lowercase=0.02 , __lowercase=3 , __lowercase=None , ):
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = image_size
UpperCAmelCase__ = patch_size
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = type_sequence_label_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase__ = (image_size // patch_size) ** 2
UpperCAmelCase__ = num_patches + 1
def A__ ( self ):
UpperCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ = self.get_config()
return config, pixel_values, labels
def A__ ( self ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowercase , initializer_range=self.initializer_range , )
def A__ ( self , __lowercase , __lowercase , __lowercase ):
UpperCAmelCase__ = TFViTModel(config=__lowercase )
UpperCAmelCase__ = model(__lowercase , training=__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase__ = self.image_size // 2
UpperCAmelCase__ = pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase__ = model(__lowercase , interpolate_pos_encoding=__lowercase , training=__lowercase )
UpperCAmelCase__ = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def A__ ( self , __lowercase , __lowercase , __lowercase ):
UpperCAmelCase__ = self.type_sequence_label_size
UpperCAmelCase__ = TFViTForImageClassification(__lowercase )
UpperCAmelCase__ = model(__lowercase , labels=__lowercase , training=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase__ = self.image_size // 2
UpperCAmelCase__ = pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase__ = model(__lowercase , interpolate_pos_encoding=__lowercase , training=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase__ = 1
UpperCAmelCase__ = TFViTForImageClassification(__lowercase )
UpperCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ = model(__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A__ ( self ):
UpperCAmelCase__ = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = config_and_inputs
UpperCAmelCase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class _UpperCamelCase ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowercase : Union[str, Any] = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
__lowercase : Tuple = (
{'feature-extraction': TFViTModel, 'image-classification': TFViTForImageClassification}
if is_tf_available()
else {}
)
__lowercase : Optional[int] = False
__lowercase : Optional[int] = False
__lowercase : Dict = False
def A__ ( self ):
UpperCAmelCase__ = TFViTModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase , hidden_size=37 )
def A__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def A__ ( self ):
pass
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def A__ ( self ):
pass
def A__ ( self ):
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(__lowercase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCAmelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowercase , tf.keras.layers.Layer ) )
def A__ ( self ):
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(__lowercase )
UpperCAmelCase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ = [*signature.parameters.keys()]
UpperCAmelCase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowercase )
def A__ ( self ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def A__ ( self ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
@slow
def A__ ( self ):
UpperCAmelCase__ = TFViTModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(__lowercase )
def snake_case__ ( ) ->str:
UpperCAmelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A__ ( self ):
return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""" ) if is_vision_available() else None
@slow
def A__ ( self ):
UpperCAmelCase__ = TFViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""" )
UpperCAmelCase__ = self.default_image_processor
UpperCAmelCase__ = prepare_img()
UpperCAmelCase__ = image_processor(images=__lowercase , return_tensors="""tf""" )
# forward pass
UpperCAmelCase__ = model(**__lowercase )
# verify the logits
UpperCAmelCase__ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowercase )
UpperCAmelCase__ = tf.constant([-0.2_744, 0.8_215, -0.0_836] )
tf.debugging.assert_near(outputs.logits[0, :3] , __lowercase , atol=1e-4 )
| 422
|
"""simple docstring"""
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="shi-labs/oneformer_demo" ) ->List[str]:
with open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) as f:
UpperCAmelCase__ = json.load(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = {}
UpperCAmelCase__ = []
UpperCAmelCase__ = []
for key, info in class_info.items():
UpperCAmelCase__ = info["""name"""]
class_names.append(info["""name"""] )
if info["isthing"]:
thing_ids.append(int(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase__ = thing_ids
UpperCAmelCase__ = class_names
return metadata
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=7 , __lowercase=3 , __lowercase=30 , __lowercase=400 , __lowercase=None , __lowercase=True , __lowercase=True , __lowercase=[0.5, 0.5, 0.5] , __lowercase=[0.5, 0.5, 0.5] , __lowercase=10 , __lowercase=False , __lowercase=255 , __lowercase="shi-labs/oneformer_demo" , __lowercase="ade20k_panoptic.json" , __lowercase=10 , ):
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = min_resolution
UpperCAmelCase__ = max_resolution
UpperCAmelCase__ = do_resize
UpperCAmelCase__ = {"""shortest_edge""": 32, """longest_edge""": 1333} if size is None else size
UpperCAmelCase__ = do_normalize
UpperCAmelCase__ = image_mean
UpperCAmelCase__ = image_std
UpperCAmelCase__ = class_info_file
UpperCAmelCase__ = prepare_metadata(__lowercase , __lowercase )
UpperCAmelCase__ = num_text
UpperCAmelCase__ = repo_path
# for the post_process_functions
UpperCAmelCase__ = 2
UpperCAmelCase__ = 10
UpperCAmelCase__ = 10
UpperCAmelCase__ = 3
UpperCAmelCase__ = 4
UpperCAmelCase__ = num_labels
UpperCAmelCase__ = do_reduce_labels
UpperCAmelCase__ = ignore_index
def A__ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def A__ ( self , __lowercase , __lowercase=False ):
if not batched:
UpperCAmelCase__ = image_inputs[0]
if isinstance(__lowercase , Image.Image ):
UpperCAmelCase__ , UpperCAmelCase__ = image.size
else:
UpperCAmelCase__ , UpperCAmelCase__ = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase__ = int(self.size["""shortest_edge"""] * h / w )
UpperCAmelCase__ = self.size["""shortest_edge"""]
elif w > h:
UpperCAmelCase__ = self.size["""shortest_edge"""]
UpperCAmelCase__ = int(self.size["""shortest_edge"""] * w / h )
else:
UpperCAmelCase__ = self.size["""shortest_edge"""]
UpperCAmelCase__ = self.size["""shortest_edge"""]
else:
UpperCAmelCase__ = []
for image in image_inputs:
UpperCAmelCase__ , UpperCAmelCase__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase__ = max(__lowercase , key=lambda __lowercase : item[0] )[0]
UpperCAmelCase__ = max(__lowercase , key=lambda __lowercase : item[1] )[1]
return expected_height, expected_width
def A__ ( self ):
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class _UpperCamelCase ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowercase : Tuple = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
__lowercase : Tuple = image_processing_class
def A__ ( self ):
UpperCAmelCase__ = OneFormerImageProcessorTester(self )
@property
def A__ ( self ):
return self.image_processing_tester.prepare_image_processor_dict()
def A__ ( self ):
UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase , """image_mean""" ) )
self.assertTrue(hasattr(__lowercase , """image_std""" ) )
self.assertTrue(hasattr(__lowercase , """do_normalize""" ) )
self.assertTrue(hasattr(__lowercase , """do_resize""" ) )
self.assertTrue(hasattr(__lowercase , """size""" ) )
self.assertTrue(hasattr(__lowercase , """ignore_index""" ) )
self.assertTrue(hasattr(__lowercase , """class_info_file""" ) )
self.assertTrue(hasattr(__lowercase , """num_text""" ) )
self.assertTrue(hasattr(__lowercase , """repo_path""" ) )
self.assertTrue(hasattr(__lowercase , """metadata""" ) )
self.assertTrue(hasattr(__lowercase , """do_reduce_labels""" ) )
def A__ ( self ):
pass
def A__ ( self ):
# Initialize image_processor
UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ = prepare_image_inputs(self.image_processing_tester , equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , Image.Image )
# Test not batched input
UpperCAmelCase__ = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ = self.image_processing_tester.get_expected_values(__lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ , UpperCAmelCase__ = self.image_processing_tester.get_expected_values(__lowercase , batched=__lowercase )
UpperCAmelCase__ = image_processor(
__lowercase , ["""semantic"""] * len(__lowercase ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self ):
# Initialize image_processor
UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase__ = prepare_image_inputs(self.image_processing_tester , equal_resolution=__lowercase , numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , np.ndarray )
# Test not batched input
UpperCAmelCase__ = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ = self.image_processing_tester.get_expected_values(__lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ , UpperCAmelCase__ = self.image_processing_tester.get_expected_values(__lowercase , batched=__lowercase )
UpperCAmelCase__ = image_processor(
__lowercase , ["""semantic"""] * len(__lowercase ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self ):
# Initialize image_processor
UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase__ = prepare_image_inputs(self.image_processing_tester , equal_resolution=__lowercase , torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , torch.Tensor )
# Test not batched input
UpperCAmelCase__ = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ = self.image_processing_tester.get_expected_values(__lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ , UpperCAmelCase__ = self.image_processing_tester.get_expected_values(__lowercase , batched=__lowercase )
UpperCAmelCase__ = image_processor(
__lowercase , ["""semantic"""] * len(__lowercase ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self , __lowercase=False , __lowercase=False , __lowercase="np" ):
UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
UpperCAmelCase__ = self.image_processing_tester.num_labels
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = prepare_image_inputs(self.image_processing_tester , equal_resolution=__lowercase )
if with_segmentation_maps:
UpperCAmelCase__ = num_labels
if is_instance_map:
UpperCAmelCase__ = list(range(__lowercase ) ) * 2
UpperCAmelCase__ = dict(enumerate(__lowercase ) )
UpperCAmelCase__ = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
UpperCAmelCase__ = [Image.fromarray(__lowercase ) for annotation in annotations]
UpperCAmelCase__ = image_processor(
__lowercase , ["""semantic"""] * len(__lowercase ) , __lowercase , return_tensors="""pt""" , instance_id_to_semantic_id=__lowercase , pad_and_return_pixel_mask=__lowercase , )
return inputs
def A__ ( self ):
pass
def A__ ( self ):
def common(__lowercase=False , __lowercase=None ):
UpperCAmelCase__ = self.comm_get_image_processor_inputs(
with_segmentation_maps=__lowercase , is_instance_map=__lowercase , segmentation_type=__lowercase )
UpperCAmelCase__ = inputs["""mask_labels"""]
UpperCAmelCase__ = inputs["""class_labels"""]
UpperCAmelCase__ = inputs["""pixel_values"""]
UpperCAmelCase__ = inputs["""text_inputs"""]
# check the batch_size
for mask_label, class_label, text_input in zip(__lowercase , __lowercase , __lowercase ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(__lowercase ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=__lowercase )
common(is_instance_map=__lowercase , segmentation_type="""pil""" )
common(is_instance_map=__lowercase , segmentation_type="""pil""" )
def A__ ( self ):
UpperCAmelCase__ = np.zeros((20, 50) )
UpperCAmelCase__ = 1
UpperCAmelCase__ = 1
UpperCAmelCase__ = 1
UpperCAmelCase__ = binary_mask_to_rle(__lowercase )
self.assertEqual(len(__lowercase ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def A__ ( self ):
UpperCAmelCase__ = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
UpperCAmelCase__ = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase__ = fature_extractor.post_process_semantic_segmentation(__lowercase )
self.assertEqual(len(__lowercase ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
UpperCAmelCase__ = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
UpperCAmelCase__ = fature_extractor.post_process_semantic_segmentation(__lowercase , target_sizes=__lowercase )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def A__ ( self ):
UpperCAmelCase__ = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
UpperCAmelCase__ = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase__ = image_processor.post_process_instance_segmentation(__lowercase , threshold=0 )
self.assertTrue(len(__lowercase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , __lowercase )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def A__ ( self ):
UpperCAmelCase__ = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
UpperCAmelCase__ = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase__ = image_processor.post_process_panoptic_segmentation(__lowercase , threshold=0 )
self.assertTrue(len(__lowercase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , __lowercase )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 422
| 1
|
'''simple docstring'''
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
UpperCamelCase_ = "true"
def lowercase__( __UpperCamelCase: Dict ,__UpperCamelCase: Any=82 ,__UpperCamelCase: Union[str, Any]=16 ):
"""simple docstring"""
set_seed(42 )
SCREAMING_SNAKE_CASE : List[Any] = RegressionModel()
SCREAMING_SNAKE_CASE : List[str] = deepcopy(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = RegressionDataset(length=__UpperCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = DataLoader(__UpperCamelCase ,batch_size=__UpperCamelCase )
model.to(accelerator.device )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = accelerator.prepare(__UpperCamelCase ,__UpperCamelCase )
return model, ddp_model, dataloader
def lowercase__( __UpperCamelCase: Accelerator ,__UpperCamelCase: Tuple=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset('glue' ,'mrpc' ,split='validation' )
def tokenize_function(__UpperCamelCase: str ):
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(examples['sentence1'] ,examples['sentence2'] ,truncation=__UpperCamelCase ,max_length=__UpperCamelCase )
return outputs
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE : Union[str, Any] = dataset.map(
__UpperCamelCase ,batched=__UpperCamelCase ,remove_columns=['idx', 'sentence1', 'sentence2'] ,)
SCREAMING_SNAKE_CASE : Any = tokenized_datasets.rename_column('label' ,'labels' )
def collate_fn(__UpperCamelCase: List[Any] ):
if use_longest:
return tokenizer.pad(__UpperCamelCase ,padding='longest' ,return_tensors='pt' )
return tokenizer.pad(__UpperCamelCase ,padding='max_length' ,max_length=1_28 ,return_tensors='pt' )
return DataLoader(__UpperCamelCase ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=16 )
def lowercase__( __UpperCamelCase: Optional[Any] ,__UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = Accelerator(dispatch_batches=__UpperCamelCase ,split_batches=__UpperCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = get_dataloader(__UpperCamelCase ,not dispatch_batches )
SCREAMING_SNAKE_CASE : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' ,return_dict=__UpperCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = accelerator.prepare(__UpperCamelCase ,__UpperCamelCase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def lowercase__( __UpperCamelCase: Tuple ,__UpperCamelCase: Optional[Any] ,__UpperCamelCase: int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = []
for batch in dataloader:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = batch.values()
with torch.no_grad():
SCREAMING_SNAKE_CASE : int = model(__UpperCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = [], []
for logit, targ in logits_and_targets:
logits.append(__UpperCamelCase )
targs.append(__UpperCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = torch.cat(__UpperCamelCase ), torch.cat(__UpperCamelCase )
return logits, targs
def lowercase__( __UpperCamelCase: Accelerator ,__UpperCamelCase: int=82 ,__UpperCamelCase: List[Any]=False ,__UpperCamelCase: Optional[Any]=False ,__UpperCamelCase: str=16 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = get_basic_setup(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = generate_predictions(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
assert (
len(__UpperCamelCase ) == num_samples
), f"Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__UpperCamelCase )}"
def lowercase__( __UpperCamelCase: bool = False ,__UpperCamelCase: bool = False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = evaluate.load('glue' ,'mrpc' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = get_mrpc_setup(__UpperCamelCase ,__UpperCamelCase )
# First do baseline
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = setup['no']
model.to(__UpperCamelCase )
model.eval()
for batch in dataloader:
batch.to(__UpperCamelCase )
with torch.inference_mode():
SCREAMING_SNAKE_CASE : Optional[Any] = model(**__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=__UpperCamelCase ,references=batch['labels'] )
SCREAMING_SNAKE_CASE : List[Any] = metric.compute()
# Then do distributed
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
SCREAMING_SNAKE_CASE : List[str] = model(**__UpperCamelCase )
SCREAMING_SNAKE_CASE : str = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE : Tuple = batch['labels']
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=__UpperCamelCase ,references=__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] ,distributed[key] ), f"Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = Accelerator(split_batches=__UpperCamelCase ,dispatch_batches=__UpperCamelCase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`" )
test_mrpc(__UpperCamelCase ,__UpperCamelCase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
SCREAMING_SNAKE_CASE : Dict = Accelerator(split_batches=__UpperCamelCase ,dispatch_batches=__UpperCamelCase )
if accelerator.is_local_main_process:
print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99" )
test_torch_metrics(__UpperCamelCase ,99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
SCREAMING_SNAKE_CASE : Union[str, Any] = Accelerator()
test_torch_metrics(__UpperCamelCase ,5_12 )
accelerator.state._reset_state()
def lowercase__( __UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 28
|
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : int = StableDiffusionDiffEditPipeline
A : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''}
A : int = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''}
A : str = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A : Union[str, Any] = frozenset([] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=A, )
SCREAMING_SNAKE_CASE : int = DDIMScheduler(
beta_start=0.0_00_85, beta_end=0.0_12, beta_schedule='scaled_linear', clip_sample=A, set_alpha_to_one=A, )
SCREAMING_SNAKE_CASE : str = DDIMInverseScheduler(
beta_start=0.0_00_85, beta_end=0.0_12, beta_schedule='scaled_linear', clip_sample=A, set_alpha_to_zero=A, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act='gelu', projection_dim=512, )
SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextModel(A )
SCREAMING_SNAKE_CASE : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE : int = {
'unet': unet,
'scheduler': scheduler,
'inverse_scheduler': inverse_scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCamelCase_ ( self, A, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 16, 16), rng=random.Random(A ) ).to(A )
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 2, 4, 16, 16), rng=random.Random(A ) ).to(A )
if str(A ).startswith('mps' ):
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(A )
else:
SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'prompt': 'a dog and a newt',
'mask_image': mask,
'image_latents': latents,
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self, A, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A )
SCREAMING_SNAKE_CASE : Any = image.cpu().permute(0, 2, 3, 1 )[0]
SCREAMING_SNAKE_CASE : Optional[int] = Image.fromarray(np.uinta(A ) ).convert('RGB' )
if str(A ).startswith('mps' ):
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(A )
else:
SCREAMING_SNAKE_CASE : int = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : Dict = {
'image': image,
'source_prompt': 'a cat and a frog',
'target_prompt': 'a dog and a newt',
'generator': generator,
'num_inference_steps': 2,
'num_maps_per_mask': 2,
'mask_encode_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self, A, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A )
SCREAMING_SNAKE_CASE : List[Any] = image.cpu().permute(0, 2, 3, 1 )[0]
SCREAMING_SNAKE_CASE : int = Image.fromarray(np.uinta(A ) ).convert('RGB' )
if str(A ).startswith('mps' ):
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(A )
else:
SCREAMING_SNAKE_CASE : Any = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : Any = {
'image': image,
'prompt': 'a cat and a frog',
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'decode_latents': True,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self ):
'''simple docstring'''
if not hasattr(self.pipeline_class, '_optional_components' ):
return
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Optional[int] = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(A, A, A )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : Dict = pipe(**A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(A )
SCREAMING_SNAKE_CASE : List[Any] = self.pipeline_class.from_pretrained(A )
pipe_loaded.to(A )
pipe_loaded.set_progress_bar_config(disable=A )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(A, A ) is None, F"`{optional_component}` did not stay set to None after loading.", )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : Tuple = pipe_loaded(**A )[0]
SCREAMING_SNAKE_CASE : List[str] = np.abs(output - output_loaded ).max()
self.assertLess(A, 1E-4 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = 'cpu'
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Union[str, Any] = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : str = self.get_dummy_mask_inputs(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.generate_mask(**A )
SCREAMING_SNAKE_CASE : Dict = mask[0, -3:, -3:]
self.assertEqual(mask.shape, (1, 16, 16) )
SCREAMING_SNAKE_CASE : Any = np.array([0] * 9 )
SCREAMING_SNAKE_CASE : Any = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A, 1E-3 )
self.assertEqual(mask[0, -3, -4], 0 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 'cpu'
SCREAMING_SNAKE_CASE : Dict = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Dict = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inversion_inputs(A )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe.invert(**A ).images
SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape, (2, 32, 32, 3) )
SCREAMING_SNAKE_CASE : Tuple = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99], )
SCREAMING_SNAKE_CASE : Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A, 1E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 'cpu'
SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Dict = {'beta_start': 0.0_00_85, 'beta_end': 0.0_12, 'beta_schedule': 'scaled_linear'}
SCREAMING_SNAKE_CASE : Union[str, Any] = DPMSolverMultistepScheduler(**A )
SCREAMING_SNAKE_CASE : Optional[int] = DPMSolverMultistepInverseScheduler(**A )
SCREAMING_SNAKE_CASE : Tuple = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inversion_inputs(A )
SCREAMING_SNAKE_CASE : List[str] = pipe.invert(**A ).images
SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape, (2, 32, 32, 3) )
SCREAMING_SNAKE_CASE : Tuple = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99], )
SCREAMING_SNAKE_CASE : Any = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A, 1E-3 )
@require_torch_gpu
@slow
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' )
SCREAMING_SNAKE_CASE : Optional[int] = raw_image.convert('RGB' ).resize((768, 768) )
SCREAMING_SNAKE_CASE : List[str] = raw_image
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1', safety_checker=A, torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : List[Any] = DDIMScheduler.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE : int = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : List[Any] = 'a bowl of fruit'
SCREAMING_SNAKE_CASE : List[str] = 'a bowl of pears'
SCREAMING_SNAKE_CASE : Dict = pipe.generate_mask(
image=self.raw_image, source_prompt=A, target_prompt=A, generator=A, )
SCREAMING_SNAKE_CASE : Optional[int] = pipe.invert(
prompt=A, image=self.raw_image, inpaint_strength=0.7, generator=A ).latents
SCREAMING_SNAKE_CASE : List[str] = pipe(
prompt=A, mask_image=A, image_latents=A, generator=A, negative_prompt=A, inpaint_strength=0.7, output_type='numpy', ).images[0]
SCREAMING_SNAKE_CASE : List[Any] = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1', safety_checker=A, torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE : List[str] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : str = 'a bowl of fruit'
SCREAMING_SNAKE_CASE : Tuple = 'a bowl of pears'
SCREAMING_SNAKE_CASE : List[Any] = pipe.generate_mask(
image=self.raw_image, source_prompt=A, target_prompt=A, generator=A, )
SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.invert(
prompt=A, image=self.raw_image, inpaint_strength=0.7, generator=A, num_inference_steps=25, ).latents
SCREAMING_SNAKE_CASE : str = pipe(
prompt=A, mask_image=A, image_latents=A, generator=A, negative_prompt=A, inpaint_strength=0.7, num_inference_steps=25, output_type='numpy', ).images[0]
SCREAMING_SNAKE_CASE : Tuple = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 28
| 1
|
"""simple docstring"""
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
lowerCAmelCase : List[str] = HfApi()
lowerCAmelCase : Tuple = {}
# fmt: off
lowerCAmelCase : List[str] = torch.tensor([
-0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7,
1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9,
-1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9,
0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7
])
lowerCAmelCase : Dict = torch.tensor([
-2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6,
1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8,
-2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8,
2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5
])
lowerCAmelCase : str = torch.tensor([
-0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9,
-0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4,
-0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5,
0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3
])
lowerCAmelCase : List[Any] = torch.tensor([
0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2,
-0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9,
0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5,
-0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5
])
lowerCAmelCase : int = torch.tensor([
0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3,
-0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5,
0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9,
-0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6
])
lowerCAmelCase : int = torch.tensor([
0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8,
-0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0,
0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3,
-0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1
])
lowerCAmelCase : Union[str, Any] = torch.tensor([
0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2,
-0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8,
0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4,
-0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0
])
lowerCAmelCase : List[str] = torch.tensor([
0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2,
-0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0,
0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6,
-0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3
])
lowerCAmelCase : Optional[int] = torch.tensor([
-1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0,
1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3,
-2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0,
1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1])
lowerCAmelCase : Any = torch.tensor([
-1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4,
0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1,
-2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9,
1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6
])
lowerCAmelCase : Union[str, Any] = torch.tensor([
-1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2,
0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7,
-2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1,
1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5
])
lowerCAmelCase : str = torch.tensor([
-2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9,
1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1,
-3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1,
3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6
])
lowerCAmelCase : List[str] = torch.tensor([
-2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0,
1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8,
-2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5,
2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3
])
lowerCAmelCase : Optional[Any] = torch.tensor([
-2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6,
1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8,
-3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0,
3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3
])
lowerCAmelCase : int = torch.tensor([
-1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4,
1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1,
-2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9,
1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9
])
# fmt: on
lowerCAmelCase : Optional[int] = api.list_models(filter="""diffusers""")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
lowerCAmelCase : Dict = """/home/patrick/google_checkpoints/""" + mod.modelId.split("""/""")[-1]
print(F"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith("""CompVis"""):
lowerCAmelCase : str = UNetaDModel.from_pretrained(local_checkpoint, subfolder="""unet""")
else:
lowerCAmelCase : Optional[Any] = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
lowerCAmelCase : Dict = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
lowerCAmelCase : List[Any] = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
lowerCAmelCase : Optional[int] = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["""_""".join("""_""".join(mod.modelId.split("""/""")).split("""-"""))], atol=1e-3
)
print(F"""{mod.modelId} has passed successfully!!!""")
| 533
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
@property
def _lowerCAmelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.dummy_uncond_unet
lowerCamelCase = ScoreSdeVeScheduler()
lowerCamelCase = ScoreSdeVePipeline(unet=_a , scheduler=_a )
sde_ve.to(_a )
sde_ve.set_progress_bar_config(disable=_a )
lowerCamelCase = torch.manual_seed(0 )
lowerCamelCase = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=_a ).images
lowerCamelCase = torch.manual_seed(0 )
lowerCamelCase = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=_a , return_dict=_a )[
0
]
lowerCamelCase = image[0, -3:, -3:, -1]
lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = """google/ncsnpp-church-256"""
lowerCamelCase = UNetaDModel.from_pretrained(_a )
lowerCamelCase = ScoreSdeVeScheduler.from_pretrained(_a )
lowerCamelCase = ScoreSdeVePipeline(unet=_a , scheduler=_a )
sde_ve.to(_a )
sde_ve.set_progress_bar_config(disable=_a )
lowerCamelCase = torch.manual_seed(0 )
lowerCamelCase = sde_ve(num_inference_steps=10 , output_type="""numpy""" , generator=_a ).images
lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCamelCase = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 533
| 1
|
'''simple docstring'''
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
a_ : Optional[int] = logging.get_logger(__name__)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCamelCase = ['pixel_values']
def __init__( self , UpperCamelCase = True , UpperCamelCase = 1 / 255 , UpperCamelCase = True , UpperCamelCase = 8 , **UpperCamelCase , ):
"""simple docstring"""
super().__init__(**lowerCamelCase__ )
lowerCamelCase_ = do_rescale
lowerCamelCase_ = rescale_factor
lowerCamelCase_ = do_pad
lowerCamelCase_ = pad_size
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , **UpperCamelCase ):
"""simple docstring"""
return rescale(lowerCamelCase__ , scale=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase = None ):
"""simple docstring"""
lowerCamelCase_ ,lowerCamelCase_ = get_image_size(lowerCamelCase__ )
lowerCamelCase_ = (old_height // size + 1) * size - old_height
lowerCamelCase_ = (old_width // size + 1) * size - old_width
return pad(lowerCamelCase__ , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=lowerCamelCase__ )
def snake_case ( self , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = ChannelDimension.FIRST , **UpperCamelCase , ):
"""simple docstring"""
lowerCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_ = do_pad if do_pad is not None else self.do_pad
lowerCamelCase_ = pad_size if pad_size is not None else self.pad_size
lowerCamelCase_ = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
lowerCamelCase_ = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_rescale:
lowerCamelCase_ = [self.rescale(image=lowerCamelCase__ , scale=lowerCamelCase__ ) for image in images]
if do_pad:
lowerCamelCase_ = [self.pad(lowerCamelCase__ , size=lowerCamelCase__ ) for image in images]
lowerCamelCase_ = [to_channel_dimension_format(lowerCamelCase__ , lowerCamelCase__ ) for image in images]
lowerCamelCase_ = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase__ , tensor_type=lowerCamelCase__ )
| 675
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
__lowercase = logging.get_logger(__name__)
__lowercase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowercase = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
__lowercase = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
__lowercase = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
__lowercase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 512,
"""facebook/dpr-ctx_encoder-multiset-base""": 512,
}
__lowercase = {
"""facebook/dpr-question_encoder-single-nq-base""": 512,
"""facebook/dpr-question_encoder-multiset-base""": 512,
}
__lowercase = {
"""facebook/dpr-reader-single-nq-base""": 512,
"""facebook/dpr-reader-multiset-base""": 512,
}
__lowercase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
__lowercase = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
__lowercase = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class _lowercase ( __lowerCamelCase ):
_lowercase : List[Any] = VOCAB_FILES_NAMES
_lowercase : Tuple = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_lowercase : int = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Optional[Any] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class _lowercase ( __lowerCamelCase ):
_lowercase : Union[str, Any] = VOCAB_FILES_NAMES
_lowercase : Any = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_lowercase : Optional[int] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Dict = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__lowercase = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
__lowercase = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
__lowercase = r"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(__lowerCamelCase )
class _lowercase :
def __call__( self : str , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Union[bool, str] = False , lowerCamelCase__ : Union[bool, str] = False , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , lowerCamelCase__ : Optional[bool] = None , **lowerCamelCase__ : Optional[int] , ) -> BatchEncoding:
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , return_tensors=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , **lowerCamelCase__ , )
elif titles is None or texts is None:
A_ = titles if texts is None else texts
return super().__call__(
lowerCamelCase__ , lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , return_tensors=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , **lowerCamelCase__ , )
A_ = titles if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) else [titles]
A_ = texts if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) else [texts]
A_ = len(lowerCamelCase__ )
A_ = questions if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) else [questions] * n_passages
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
raise ValueError(
F"There should be as many titles than texts but got {len(lowerCamelCase__ )} titles and {len(lowerCamelCase__ )} texts." )
A_ = super().__call__(lowerCamelCase__ , lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ )['''input_ids''']
A_ = super().__call__(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ )['''input_ids''']
A_ = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCamelCase__ , lowerCamelCase__ )
]
}
if return_attention_mask is not False:
A_ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
A_ = attention_mask
return self.pad(lowerCamelCase__ , padding=lowerCamelCase__ , max_length=lowerCamelCase__ , return_tensors=lowerCamelCase__ )
def UpperCamelCase ( self : int , lowerCamelCase__ : BatchEncoding , lowerCamelCase__ : DPRReaderOutput , lowerCamelCase__ : int = 1_6 , lowerCamelCase__ : int = 6_4 , lowerCamelCase__ : int = 4 , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
A_ = reader_input['''input_ids''']
A_ ,A_ ,A_ = reader_output[:3]
A_ = len(lowerCamelCase__ )
A_ = sorted(range(lowerCamelCase__ ) , reverse=lowerCamelCase__ , key=relevance_logits.__getitem__ )
A_ = []
for doc_id in sorted_docs:
A_ = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
A_ = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
A_ = sequence_ids.index(self.pad_token_id )
else:
A_ = len(lowerCamelCase__ )
A_ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCamelCase__ , top_spans=lowerCamelCase__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCamelCase__ , start_index=lowerCamelCase__ , end_index=lowerCamelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(lowerCamelCase__ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCamelCase ( self : str , lowerCamelCase__ : List[int] , lowerCamelCase__ : List[int] , lowerCamelCase__ : int , lowerCamelCase__ : int , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
A_ = []
for start_index, start_score in enumerate(lowerCamelCase__ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
A_ = sorted(lowerCamelCase__ , key=lambda lowerCamelCase__ : x[1] , reverse=lowerCamelCase__ )
A_ = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F"Wrong span indices: [{start_index}:{end_index}]" )
A_ = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F"Span is too long: {length} > {max_answer_length}" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowerCamelCase__ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__lowerCamelCase )
class _lowercase ( __lowerCamelCase,__lowerCamelCase ):
_lowercase : Any = VOCAB_FILES_NAMES
_lowercase : Any = READER_PRETRAINED_VOCAB_FILES_MAP
_lowercase : Union[str, Any] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Union[str, Any] = READER_PRETRAINED_INIT_CONFIGURATION
_lowercase : List[Any] = ['input_ids', 'attention_mask']
| 203
| 0
|
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'microsoft/conditional-detr-resnet-50': (
'https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'
),
}
class _snake_case ( __snake_case ):
"""simple docstring"""
a = 'conditional_detr'
a = ['past_key_values']
a = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Tuple , _A : Optional[int]=True , _A : str=None , _A : str=3 , _A : Any=3_0_0 , _A : Optional[int]=6 , _A : str=2_0_4_8 , _A : Optional[int]=8 , _A : Optional[int]=6 , _A : Optional[Any]=2_0_4_8 , _A : Any=8 , _A : Optional[int]=0.0 , _A : int=0.0 , _A : Optional[int]=True , _A : Optional[int]="relu" , _A : str=2_5_6 , _A : int=0.1 , _A : Dict=0.0 , _A : int=0.0 , _A : Optional[int]=0.02 , _A : List[str]=1.0 , _A : List[Any]=False , _A : str="sine" , _A : Tuple="resnet50" , _A : int=True , _A : Optional[int]=False , _A : List[str]=2 , _A : Dict=5 , _A : Union[str, Any]=2 , _A : Dict=1 , _A : Union[str, Any]=1 , _A : Dict=2 , _A : Union[str, Any]=5 , _A : List[Any]=2 , _A : Dict=0.25 , **_A : int , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can\'t specify both `backbone_config` and `use_timm_backbone`.""")
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""")
_SCREAMING_SNAKE_CASE : Optional[int] = CONFIG_MAPPING['''resnet'''](out_features=["""stage4"""])
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__):
_SCREAMING_SNAKE_CASE : Tuple = backbone_config.get("""model_type""")
_SCREAMING_SNAKE_CASE : str = CONFIG_MAPPING[backbone_model_type]
_SCREAMING_SNAKE_CASE : str = config_class.from_dict(UpperCAmelCase__)
_SCREAMING_SNAKE_CASE : Dict = use_timm_backbone
_SCREAMING_SNAKE_CASE : List[Any] = backbone_config
_SCREAMING_SNAKE_CASE : List[Any] = num_channels
_SCREAMING_SNAKE_CASE : List[str] = num_queries
_SCREAMING_SNAKE_CASE : str = d_model
_SCREAMING_SNAKE_CASE : int = encoder_ffn_dim
_SCREAMING_SNAKE_CASE : str = encoder_layers
_SCREAMING_SNAKE_CASE : int = encoder_attention_heads
_SCREAMING_SNAKE_CASE : Dict = decoder_ffn_dim
_SCREAMING_SNAKE_CASE : int = decoder_layers
_SCREAMING_SNAKE_CASE : Any = decoder_attention_heads
_SCREAMING_SNAKE_CASE : Any = dropout
_SCREAMING_SNAKE_CASE : Optional[Any] = attention_dropout
_SCREAMING_SNAKE_CASE : List[str] = activation_dropout
_SCREAMING_SNAKE_CASE : str = activation_function
_SCREAMING_SNAKE_CASE : Any = init_std
_SCREAMING_SNAKE_CASE : Any = init_xavier_std
_SCREAMING_SNAKE_CASE : Tuple = encoder_layerdrop
_SCREAMING_SNAKE_CASE : List[Any] = decoder_layerdrop
_SCREAMING_SNAKE_CASE : List[Any] = encoder_layers
_SCREAMING_SNAKE_CASE : List[str] = auxiliary_loss
_SCREAMING_SNAKE_CASE : Optional[Any] = position_embedding_type
_SCREAMING_SNAKE_CASE : Any = backbone
_SCREAMING_SNAKE_CASE : List[Any] = use_pretrained_backbone
_SCREAMING_SNAKE_CASE : int = dilation
# Hungarian matcher
_SCREAMING_SNAKE_CASE : Union[str, Any] = class_cost
_SCREAMING_SNAKE_CASE : Any = bbox_cost
_SCREAMING_SNAKE_CASE : Dict = giou_cost
# Loss coefficients
_SCREAMING_SNAKE_CASE : List[Any] = mask_loss_coefficient
_SCREAMING_SNAKE_CASE : Union[str, Any] = dice_loss_coefficient
_SCREAMING_SNAKE_CASE : Tuple = cls_loss_coefficient
_SCREAMING_SNAKE_CASE : Dict = bbox_loss_coefficient
_SCREAMING_SNAKE_CASE : int = giou_loss_coefficient
_SCREAMING_SNAKE_CASE : Optional[Any] = focal_alpha
super().__init__(is_encoder_decoder=UpperCAmelCase__ , **UpperCAmelCase__)
@property
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
return self.encoder_attention_heads
@property
def _lowerCAmelCase ( self : Optional[Any]):
"""simple docstring"""
return self.d_model
def _lowerCAmelCase ( self : int):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = copy.deepcopy(self.__dict__)
if self.backbone_config is not None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.backbone_config.to_dict()
_SCREAMING_SNAKE_CASE : List[str] = self.__class__.model_type
return output
class _snake_case ( __snake_case ):
"""simple docstring"""
a = version.parse("1.11" )
@property
def _lowerCAmelCase ( self : Optional[int]):
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
])
@property
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
return 1e-5
@property
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
return 1_2
| 718
|
"""simple docstring"""
import argparse
from collections import defaultdict
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> int:
_SCREAMING_SNAKE_CASE : str = F"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(__SCREAMING_SNAKE_CASE , """r""" ) as f:
_SCREAMING_SNAKE_CASE : Union[str, Any] = f.readlines()
_SCREAMING_SNAKE_CASE : Optional[Any] = F"""class {class_name}("""
_SCREAMING_SNAKE_CASE : List[Any] = F"""{4 * " "}def {test_name}("""
_SCREAMING_SNAKE_CASE : Tuple = F"""{8 * " "}{correct_line.split()[0]}"""
_SCREAMING_SNAKE_CASE : List[Any] = F"""{16 * " "}{correct_line.split()[0]}"""
_SCREAMING_SNAKE_CASE : List[str] = False
_SCREAMING_SNAKE_CASE : Tuple = False
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
_SCREAMING_SNAKE_CASE : Optional[int] = False
_SCREAMING_SNAKE_CASE : Any = 0
_SCREAMING_SNAKE_CASE : Optional[Any] = 0
_SCREAMING_SNAKE_CASE : Dict = []
for line in lines:
if line.startswith(__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Any = True
elif in_class and line.startswith(__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : str = True
elif in_class and in_func and (line.startswith(__SCREAMING_SNAKE_CASE ) or line.startswith(__SCREAMING_SNAKE_CASE )):
_SCREAMING_SNAKE_CASE : Dict = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_SCREAMING_SNAKE_CASE : int = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_SCREAMING_SNAKE_CASE : Any = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"""{spaces * " "}{correct_line}""" )
_SCREAMING_SNAKE_CASE : Optional[int] = False
else:
new_lines.append(__SCREAMING_SNAKE_CASE )
with open(__SCREAMING_SNAKE_CASE , """w""" ) as f:
for line in new_lines:
f.write(__SCREAMING_SNAKE_CASE )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None )-> Optional[Any]:
if fail is not None:
with open(__SCREAMING_SNAKE_CASE , """r""" ) as f:
_SCREAMING_SNAKE_CASE : Union[str, Any] = {l.strip() for l in f.readlines()}
else:
_SCREAMING_SNAKE_CASE : str = None
with open(__SCREAMING_SNAKE_CASE , """r""" ) as f:
_SCREAMING_SNAKE_CASE : str = f.readlines()
_SCREAMING_SNAKE_CASE : str = defaultdict(__SCREAMING_SNAKE_CASE )
for line in correct_lines:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = line.split(""";""" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''')
parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None)
lowerCAmelCase_ = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 635
| 0
|
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='%(message)s')
def A ( _SCREAMING_SNAKE_CASE ) -> Tuple:
return input_array.reshape((input_array.size, 1) )
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
lowerCamelCase : Dict = np.nan
for i in range(_SCREAMING_SNAKE_CASE ):
lowerCamelCase : Tuple = features[:, labels == i]
lowerCamelCase : List[str] = data.mean(1 )
# Centralize the data of class i
lowerCamelCase : Any = data - column_reshape(_SCREAMING_SNAKE_CASE )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_SCREAMING_SNAKE_CASE ,centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
lowerCamelCase : int = np.dot(_SCREAMING_SNAKE_CASE ,centered_data.T )
return covariance_sum / features.shape[1]
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Dict:
lowerCamelCase : Union[str, Any] = features.mean(1 )
lowerCamelCase : Optional[Any] = np.nan
for i in range(_SCREAMING_SNAKE_CASE ):
lowerCamelCase : Tuple = features[:, labels == i]
lowerCamelCase : Any = data.shape[1]
lowerCamelCase : str = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_SCREAMING_SNAKE_CASE ) - column_reshape(_SCREAMING_SNAKE_CASE ) ,(column_reshape(_SCREAMING_SNAKE_CASE ) - column_reshape(_SCREAMING_SNAKE_CASE )).T ,)
else:
# If covariance_sum is np.nan (i.e. first loop)
lowerCamelCase : Any = device_data * np.dot(
column_reshape(_SCREAMING_SNAKE_CASE ) - column_reshape(_SCREAMING_SNAKE_CASE ) ,(column_reshape(_SCREAMING_SNAKE_CASE ) - column_reshape(_SCREAMING_SNAKE_CASE )).T ,)
return covariance_sum / features.shape[1]
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[Any]:
if features.any():
lowerCamelCase : Dict = features.mean(1 )
# Center the dataset
lowerCamelCase : Union[str, Any] = features - np.reshape(_SCREAMING_SNAKE_CASE ,(data_mean.size, 1) )
lowerCamelCase : Optional[int] = np.dot(_SCREAMING_SNAKE_CASE ,centered_data.T ) / features.shape[1]
lowerCamelCase : List[Any] = np.linalg.eigh(_SCREAMING_SNAKE_CASE )
# Take all the columns in the reverse order (-1), and then takes only the first
lowerCamelCase : Tuple = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
lowerCamelCase : Tuple = np.dot(filtered_eigenvectors.T ,_SCREAMING_SNAKE_CASE )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR ,format="%(message)s" ,force=_SCREAMING_SNAKE_CASE )
logging.error("Dataset empty" )
raise AssertionError
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any:
assert classes > dimensions
# Check if features have been already loaded
if features.any:
lowerCamelCase : int = eigh(
covariance_between_classes(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) ,covariance_within_classes(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) ,)
lowerCamelCase : List[str] = eigenvectors[:, ::-1][:, :dimensions]
lowerCamelCase : str = np.linalg.svd(_SCREAMING_SNAKE_CASE )
lowerCamelCase : int = svd_matrix[:, 0:dimensions]
lowerCamelCase : Tuple = np.dot(filtered_svd_matrix.T ,_SCREAMING_SNAKE_CASE )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR ,format="%(message)s" ,force=_SCREAMING_SNAKE_CASE )
logging.error("Dataset empty" )
raise AssertionError
def A ( ) -> Optional[int]:
lowerCamelCase : str = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
lowerCamelCase : Any = np.array([0, 0, 0, 1, 1] )
lowerCamelCase : List[Any] = 2
lowerCamelCase : Any = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_SCREAMING_SNAKE_CASE ) as error_info:
lowerCamelCase : List[Any] = linear_discriminant_analysis(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE ,np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def A ( ) -> List[str]:
lowerCamelCase : Any = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
lowerCamelCase : Any = 2
lowerCamelCase : str = np.array([[6.92820323, 8.66025404, 10.3923_0485], [3.0, 3.0, 3.0]] )
with pytest.raises(_SCREAMING_SNAKE_CASE ) as error_info:
lowerCamelCase : List[str] = principal_component_analysis(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
if not np.allclose(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311
|
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
super().__init__()
if safety_checker is None:
logger.warning(
f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
speech_model=a__ , speech_processor=a__ , vae=a__ , text_encoder=a__ , tokenizer=a__ , unet=a__ , scheduler=a__ , feature_extractor=a__ , )
def a_ ( self , a__ = "auto" ):
if slice_size == "auto":
__SCREAMING_SNAKE_CASE : List[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(a__ )
def a_ ( self ):
self.enable_attention_slicing(a__ )
@torch.no_grad()
def __call__( self , a__ , a__=16000 , a__ = 512 , a__ = 512 , a__ = 50 , a__ = 7.5 , a__ = None , a__ = 1 , a__ = 0.0 , a__ = None , a__ = None , a__ = "pil" , a__ = True , a__ = None , a__ = 1 , **a__ , ):
__SCREAMING_SNAKE_CASE : Optional[Any] = self.speech_processor.feature_extractor(
a__ , return_tensors="pt" , sampling_rate=a__ ).input_features.to(self.device )
__SCREAMING_SNAKE_CASE : Optional[int] = self.speech_model.generate(a__ , max_length=480000 )
__SCREAMING_SNAKE_CASE : List[Any] = self.speech_processor.tokenizer.batch_decode(a__ , skip_special_tokens=a__ , normalize=a__ )[
0
]
if isinstance(a__ , a__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1
elif isinstance(a__ , a__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = len(a__ )
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(a__ )}' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(a__ , a__ ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(a__ )}.' )
# get prompt text embeddings
__SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(
a__ , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
__SCREAMING_SNAKE_CASE : Tuple = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__SCREAMING_SNAKE_CASE : Tuple = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f' {self.tokenizer.model_max_length} tokens: {removed_text}' )
__SCREAMING_SNAKE_CASE : Tuple = text_input_ids[:, : self.tokenizer.model_max_length]
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = text_embeddings.shape
__SCREAMING_SNAKE_CASE : int = text_embeddings.repeat(1 , a__ , 1 )
__SCREAMING_SNAKE_CASE : Optional[int] = text_embeddings.view(bs_embed * num_images_per_prompt , a__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__SCREAMING_SNAKE_CASE : str = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__SCREAMING_SNAKE_CASE : List[str]
if negative_prompt is None:
__SCREAMING_SNAKE_CASE : Any = [""] * batch_size
elif type(a__ ) is not type(a__ ):
raise TypeError(
f'`negative_prompt` should be the same type to `prompt`, but got {type(a__ )} !='
f' {type(a__ )}.' )
elif isinstance(a__ , a__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = [negative_prompt]
elif batch_size != len(a__ ):
raise ValueError(
f'`negative_prompt`: {negative_prompt} has batch size {len(a__ )}, but `prompt`:'
f' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
" the batch size of `prompt`." )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = negative_prompt
__SCREAMING_SNAKE_CASE : Optional[int] = text_input_ids.shape[-1]
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer(
a__ , padding="max_length" , max_length=a__ , truncation=a__ , return_tensors="pt" , )
__SCREAMING_SNAKE_CASE : Dict = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__SCREAMING_SNAKE_CASE : Dict = uncond_embeddings.shape[1]
__SCREAMING_SNAKE_CASE : int = uncond_embeddings.repeat(1 , a__ , 1 )
__SCREAMING_SNAKE_CASE : Any = uncond_embeddings.view(batch_size * num_images_per_prompt , a__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__SCREAMING_SNAKE_CASE : Dict = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__SCREAMING_SNAKE_CASE : Dict = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__SCREAMING_SNAKE_CASE : int = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__SCREAMING_SNAKE_CASE : Optional[int] = torch.randn(a__ , generator=a__ , device="cpu" , dtype=a__ ).to(
self.device )
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.randn(a__ , generator=a__ , device=self.device , dtype=a__ )
else:
if latents.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
__SCREAMING_SNAKE_CASE : Dict = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(a__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__SCREAMING_SNAKE_CASE : List[str] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__SCREAMING_SNAKE_CASE : Union[str, Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__SCREAMING_SNAKE_CASE : str = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {}
if accepts_eta:
__SCREAMING_SNAKE_CASE : Dict = eta
for i, t in enumerate(self.progress_bar(a__ ) ):
# expand the latents if we are doing classifier free guidance
__SCREAMING_SNAKE_CASE : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler.scale_model_input(a__ , a__ )
# predict the noise residual
__SCREAMING_SNAKE_CASE : List[Any] = self.unet(a__ , a__ , encoder_hidden_states=a__ ).sample
# perform guidance
if do_classifier_free_guidance:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = noise_pred.chunk(2 )
__SCREAMING_SNAKE_CASE : Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__SCREAMING_SNAKE_CASE : Any = self.scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(a__ , a__ , a__ )
__SCREAMING_SNAKE_CASE : Any = 1 / 0.18215 * latents
__SCREAMING_SNAKE_CASE : Optional[Any] = self.vae.decode(a__ ).sample
__SCREAMING_SNAKE_CASE : List[str] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__SCREAMING_SNAKE_CASE : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__SCREAMING_SNAKE_CASE : int = self.numpy_to_pil(a__ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=a__ , nsfw_content_detected=a__ )
| 211
| 0
|
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase = '''▁'''
UpperCamelCase = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( __UpperCAmelCase, unittest.TestCase ):
lowerCamelCase_ : Union[str, Any] = BigBirdTokenizer
lowerCamelCase_ : Any = BigBirdTokenizerFast
lowerCamelCase_ : str = True
lowerCamelCase_ : Optional[Any] = True
def UpperCAmelCase_ (self : Tuple ) -> str:
"""simple docstring"""
super().setUp()
lowerCamelCase_ : str = self.tokenizer_class(UpperCAmelCase_ , keep_accents=UpperCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ (self : Any ) -> Dict:
"""simple docstring"""
lowerCamelCase_ : Dict = '<s>'
lowerCamelCase_ : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) , UpperCAmelCase_ )
def UpperCAmelCase_ (self : Optional[int] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '[MASK]' )
self.assertEqual(len(UpperCAmelCase_ ) , 1004 )
def UpperCAmelCase_ (self : Tuple ) -> Any:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def UpperCAmelCase_ (self : str ) -> List[str]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowerCamelCase_ : List[str] = self.get_tokenizer()
lowerCamelCase_ : Any = self.get_rust_tokenizer()
lowerCamelCase_ : Union[str, Any] = 'I was born in 92000, and this is falsé.'
lowerCamelCase_ : str = tokenizer.tokenize(UpperCAmelCase_ )
lowerCamelCase_ : Optional[int] = rust_tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase_ : Tuple = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
lowerCamelCase_ : Optional[Any] = rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase_ : List[Any] = self.get_rust_tokenizer()
lowerCamelCase_ : List[Any] = tokenizer.encode(UpperCAmelCase_ )
lowerCamelCase_ : Optional[Any] = rust_tokenizer.encode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCAmelCase_ (self : str ) -> Dict:
"""simple docstring"""
lowerCamelCase_ : Tuple = BigBirdTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_ )
lowerCamelCase_ : Tuple = tokenizer.tokenize('This is a test' )
self.assertListEqual(UpperCAmelCase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [285, 46, 10, 170, 382] , )
lowerCamelCase_ : Optional[Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCAmelCase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowerCamelCase_ : Union[str, Any] = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowerCamelCase_ : str = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def UpperCAmelCase_ (self : int ) -> Dict:
"""simple docstring"""
return BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base' )
@slow
def UpperCAmelCase_ (self : Dict ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = 'Hello World!'
lowerCamelCase_ : List[str] = [65, 1_8536, 2260, 101, 66]
self.assertListEqual(UpperCAmelCase_ , self.big_tokenizer.encode(UpperCAmelCase_ ) )
@slow
def UpperCAmelCase_ (self : int ) -> Any:
"""simple docstring"""
lowerCamelCase_ : Tuple = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
# fmt: off
lowerCamelCase_ : Optional[int] = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 3_4324, 497, 391, 408, 1_1342, 1244, 385, 100, 938, 985, 456, 574, 362, 1_2597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(UpperCAmelCase_ , self.big_tokenizer.encode(UpperCAmelCase_ ) )
@require_torch
@slow
def UpperCAmelCase_ (self : str ) -> List[Any]:
"""simple docstring"""
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
lowerCamelCase_ : Optional[int] = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowerCamelCase_ : Optional[int] = ' '.join(UpperCAmelCase_ )
lowerCamelCase_ : Optional[int] = self.big_tokenizer.encode_plus(UpperCAmelCase_ , return_tensors='pt' , return_token_type_ids=UpperCAmelCase_ )
lowerCamelCase_ : int = self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=UpperCAmelCase_ )
lowerCamelCase_ : Union[str, Any] = BigBirdConfig(attention_type='original_full' )
lowerCamelCase_ : Dict = BigBirdModel(UpperCAmelCase_ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCAmelCase_ )
model(**UpperCAmelCase_ )
@slow
def UpperCAmelCase_ (self : List[Any] ) -> int:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base' )
lowerCamelCase_ : Tuple = tokenizer.decode(tokenizer('Paris is the [MASK].' ).input_ids )
self.assertTrue(decoded_text == '[CLS] Paris is the[MASK].[SEP]' )
@slow
def UpperCAmelCase_ (self : Any ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : str = {'input_ids': [[65, 3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114, 66], [65, 448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ , model_name='google/bigbird-roberta-base' , revision='215c99f1600e06f83acce68422f2035b2b5c3510' , )
| 711
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase = {
'''vocab_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-openqa''': (
'''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-reader''': (
'''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-openqa''': (
'''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-reader''': (
'''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase = {
'''google/realm-cc-news-pretrained-embedder''': 5_1_2,
'''google/realm-cc-news-pretrained-encoder''': 5_1_2,
'''google/realm-cc-news-pretrained-scorer''': 5_1_2,
'''google/realm-cc-news-pretrained-openqa''': 5_1_2,
'''google/realm-orqa-nq-openqa''': 5_1_2,
'''google/realm-orqa-nq-reader''': 5_1_2,
'''google/realm-orqa-wq-openqa''': 5_1_2,
'''google/realm-orqa-wq-reader''': 5_1_2,
}
UpperCamelCase = {
'''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-reader''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-reader''': {'''do_lower_case''': True},
}
class lowerCamelCase__ ( UpperCAmelCase ):
lowerCamelCase_ : Any = VOCAB_FILES_NAMES
lowerCamelCase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ : Tuple = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ : Optional[Any] = RealmTokenizer
def __init__(self : Optional[Any] , _snake_case : Optional[Any]=None , _snake_case : List[Any]=None , _snake_case : str=True , _snake_case : List[Any]="[UNK]" , _snake_case : int="[SEP]" , _snake_case : List[Any]="[PAD]" , _snake_case : Dict="[CLS]" , _snake_case : Dict="[MASK]" , _snake_case : Optional[Any]=True , _snake_case : Union[str, Any]=None , **_snake_case : Optional[Any] , ) -> Dict:
"""simple docstring"""
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , tokenize_chinese_chars=_snake_case , strip_accents=_snake_case , **_snake_case , )
lowerCamelCase_ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _snake_case ) != do_lower_case
or normalizer_state.get('strip_accents' , _snake_case ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _snake_case ) != tokenize_chinese_chars
):
lowerCamelCase_ : Optional[int] = getattr(_snake_case , normalizer_state.pop('type' ) )
lowerCamelCase_ : Optional[int] = do_lower_case
lowerCamelCase_ : Tuple = strip_accents
lowerCamelCase_ : List[Any] = tokenize_chinese_chars
lowerCamelCase_ : Union[str, Any] = normalizer_class(**_snake_case )
lowerCamelCase_ : Tuple = do_lower_case
def UpperCAmelCase_ (self : Union[str, Any] , _snake_case : Optional[Any] , **_snake_case : List[str] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = PaddingStrategy.MAX_LENGTH
lowerCamelCase_ : List[Any] = text
lowerCamelCase_ : List[Any] = kwargs.pop('text_pair' , _snake_case )
lowerCamelCase_ : Any = kwargs.pop('return_tensors' , _snake_case )
lowerCamelCase_ : str = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(_snake_case ):
if batch_text_pair is not None:
lowerCamelCase_ : Union[str, Any] = batch_text_pair[idx]
else:
lowerCamelCase_ : List[str] = None
lowerCamelCase_ : Any = super().__call__(_snake_case , _snake_case , return_tensors=_snake_case , **_snake_case )
lowerCamelCase_ : Union[str, Any] = encoded_candidates.get('input_ids' )
lowerCamelCase_ : List[Any] = encoded_candidates.get('attention_mask' )
lowerCamelCase_ : Tuple = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(_snake_case )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(_snake_case )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(_snake_case )
lowerCamelCase_ : Dict = {key: item for key, item in output_data.items() if len(_snake_case ) != 0}
return BatchEncoding(_snake_case , tensor_type=_snake_case )
def UpperCAmelCase_ (self : Optional[int] , _snake_case : List[Any] , _snake_case : int=None ) -> int:
"""simple docstring"""
lowerCamelCase_ : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ (self : int , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCamelCase_ : List[Any] = [self.sep_token_id]
lowerCamelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ (self : str , _snake_case : str , _snake_case : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = self._tokenizer.model.save(_snake_case , name=_snake_case )
return tuple(_snake_case )
| 144
| 0
|
"""simple docstring"""
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] )-> List[str]:
'''simple docstring'''
if not is_accelerate_available():
return method
UpperCAmelCase__ : Union[str, Any] = version.parse(accelerate.__version__ ).base_version
if version.parse(snake_case ) < version.parse("0.17.0" ):
return method
def wrapper(self : Tuple , *snake_case : Dict , **snake_case : str ):
if hasattr(self , "_hf_hook" ) and hasattr(self._hf_hook , "pre_forward" ):
self._hf_hook.pre_forward(self )
return method(self , *snake_case , **snake_case )
return wrapper
| 438
|
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small" )
UpperCAmelCase__ : str = AutoTokenizer.from_pretrained("google/mt5-small" )
UpperCAmelCase__ : str = tokenizer("Hello there" , return_tensors="np" ).input_ids
UpperCAmelCase__ : Optional[Any] = tokenizer("Hi I am" , return_tensors="np" ).input_ids
UpperCAmelCase__ : Any = shift_tokens_right(snake_case__ , model.config.pad_token_id , model.config.decoder_start_token_id )
UpperCAmelCase__ : Any = model(snake_case__ , decoder_input_ids=snake_case__ ).logits
UpperCAmelCase__ : Any = optax.softmax_cross_entropy(snake_case__ , onehot(snake_case__ , logits.shape[-1] ) ).mean()
UpperCAmelCase__ : List[str] = -(labels.shape[-1] * loss.item())
UpperCAmelCase__ : Dict = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 438
| 1
|
'''simple docstring'''
from manim import *
class SCREAMING_SNAKE_CASE ( _A):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase =Rectangle(height=0.5 , width=0.5 )
_lowerCAmelCase =Rectangle(height=0.25 , width=0.25 )
_lowerCAmelCase =Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_lowerCAmelCase =[mem.copy() for i in range(6 )]
_lowerCAmelCase =[mem.copy() for i in range(6 )]
_lowerCAmelCase =VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
_lowerCAmelCase =VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
_lowerCAmelCase =VGroup(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
_lowerCAmelCase =Text('CPU' , font_size=24 )
_lowerCAmelCase =Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCamelCase__ )
_lowerCAmelCase =[mem.copy() for i in range(4 )]
_lowerCAmelCase =VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
_lowerCAmelCase =Text('GPU' , font_size=24 )
_lowerCAmelCase =Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ )
gpu.move_to([-1, -1, 0] )
self.add(UpperCamelCase__ )
_lowerCAmelCase =[mem.copy() for i in range(6 )]
_lowerCAmelCase =VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
_lowerCAmelCase =Text('Model' , font_size=24 )
_lowerCAmelCase =Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ )
model.move_to([3, -1.0, 0] )
self.add(UpperCamelCase__ )
_lowerCAmelCase =[]
_lowerCAmelCase =[]
_lowerCAmelCase =[]
for i, rect in enumerate(UpperCamelCase__ ):
rect.set_stroke(UpperCamelCase__ )
_lowerCAmelCase =Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(UpperCamelCase__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=UpperCamelCase__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=UpperCamelCase__ , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=UpperCamelCase__ , buff=0.0 )
self.add(UpperCamelCase__ )
model_cpu_arr.append(UpperCamelCase__ )
self.add(*UpperCamelCase__ , *UpperCamelCase__ , *UpperCamelCase__ )
_lowerCAmelCase =[mem.copy() for i in range(6 )]
_lowerCAmelCase =VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
_lowerCAmelCase =Text('Loaded Checkpoint' , font_size=24 )
_lowerCAmelCase =Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ )
checkpoint.move_to([3, 0.5, 0] )
self.add(UpperCamelCase__ )
_lowerCAmelCase =[]
_lowerCAmelCase =[]
for i, rect in enumerate(UpperCamelCase__ ):
_lowerCAmelCase =fill.copy().set_fill(UpperCamelCase__ , opacity=0.7 )
target.move_to(UpperCamelCase__ )
ckpt_arr.append(UpperCamelCase__ )
_lowerCAmelCase =target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(UpperCamelCase__ )
self.add(*UpperCamelCase__ , *UpperCamelCase__ )
_lowerCAmelCase =Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_lowerCAmelCase =MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(UpperCamelCase__ , UpperCamelCase__ )
_lowerCAmelCase =MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(UpperCamelCase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(UpperCamelCase__ )
_lowerCAmelCase =MarkupText(
F'''Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
_lowerCAmelCase =[meta_mem.copy() for i in range(6 )]
_lowerCAmelCase =[meta_mem.copy() for i in range(6 )]
_lowerCAmelCase =VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
_lowerCAmelCase =VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
_lowerCAmelCase =VGroup(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
_lowerCAmelCase =Text('Disk' , font_size=24 )
_lowerCAmelCase =Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(UpperCamelCase__ , run_time=3 ) , Write(UpperCamelCase__ , run_time=1 ) , Create(UpperCamelCase__ , run_time=1 ) )
_lowerCAmelCase =[]
for i, rect in enumerate(UpperCamelCase__ ):
_lowerCAmelCase =rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(UpperCamelCase__ , run_time=1.5 ) )
self.play(*UpperCamelCase__ )
self.play(FadeOut(UpperCamelCase__ ) )
_lowerCAmelCase =MarkupText(F'''Then, the checkpoint is removed from memory\nthrough garbage collection.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCamelCase__ , run_time=3 ) )
self.play(
FadeOut(UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ , *UpperCamelCase__ ) , )
self.wait()
| 709
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 58
| 0
|
def __snake_case ( lowerCAmelCase_ = 1_0_0 ) -> int:
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F'{solution() = }')
| 100
|
"""simple docstring"""
def lowerCamelCase__ ( UpperCAmelCase_ = 60_08_51_47_51_43 )-> int:
"""simple docstring"""
try:
UpperCamelCase = int(UpperCAmelCase_ )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
UpperCamelCase = 2
UpperCamelCase = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
UpperCamelCase = i
while n % i == 0:
UpperCamelCase = n // i
i += 1
return int(UpperCAmelCase_ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 554
| 0
|
"""simple docstring"""
from math import factorial, radians
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 18 , SCREAMING_SNAKE_CASE = 10 ) -> float:
"""simple docstring"""
__snake_case = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
__snake_case = radians(SCREAMING_SNAKE_CASE )
__snake_case = angle_in_radians
__snake_case = 3
__snake_case = -1
for _ in range(SCREAMING_SNAKE_CASE ):
result += (b * (angle_in_radians**a)) / factorial(SCREAMING_SNAKE_CASE )
__snake_case = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 614
|
"""simple docstring"""
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def __UpperCamelCase ( ) -> tuple[list[int], int]:
"""simple docstring"""
__snake_case = [randint(-10_00 , 10_00 ) for i in range(10 )]
__snake_case = randint(-50_00 , 50_00 )
return (arr, r)
_SCREAMING_SNAKE_CASE = make_dataset()
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> tuple[int, ...]:
"""simple docstring"""
for triplet in permutations(SCREAMING_SNAKE_CASE , 3 ):
if sum(SCREAMING_SNAKE_CASE ) == target:
return tuple(sorted(SCREAMING_SNAKE_CASE ) )
return (0, 0, 0)
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> tuple[int, int, int]:
"""simple docstring"""
arr.sort()
__snake_case = len(SCREAMING_SNAKE_CASE )
for i in range(n - 1 ):
__snake_case , __snake_case = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def __UpperCamelCase ( ) -> tuple[float, float]:
"""simple docstring"""
__snake_case = "\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n"
__snake_case = "\ntriplet_sum1(*dataset)\n"
__snake_case = "\ntriplet_sum2(*dataset)\n"
__snake_case = repeat(setup=SCREAMING_SNAKE_CASE , stmt=SCREAMING_SNAKE_CASE , repeat=5 , number=1_00_00 )
__snake_case = repeat(setup=SCREAMING_SNAKE_CASE , stmt=SCREAMING_SNAKE_CASE , repeat=5 , number=1_00_00 )
return (min(SCREAMING_SNAKE_CASE ), min(SCREAMING_SNAKE_CASE ))
if __name__ == "__main__":
from doctest import testmod
testmod()
_SCREAMING_SNAKE_CASE = solution_times()
print(F"""The time for naive implementation is {times[0]}.""")
print(F"""The time for optimized implementation is {times[1]}.""")
| 614
| 1
|
from math import pi, sqrt, tan
def _A ( SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
if side_length < 0:
raise ValueError("surface_area_cube() only accepts non-negative values" )
return 6 * side_length**2
def _A ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
if length < 0 or breadth < 0 or height < 0:
raise ValueError("surface_area_cuboid() only accepts non-negative values" )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def _A ( SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
if radius < 0:
raise ValueError("surface_area_sphere() only accepts non-negative values" )
return 4 * pi * radius**2
def _A ( SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
if radius < 0:
raise ValueError("surface_area_hemisphere() only accepts non-negative values" )
return 3 * pi * radius**2
def _A ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError("surface_area_cone() only accepts non-negative values" )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def _A ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"surface_area_conical_frustum() only accepts non-negative values" )
a__ : str =(height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def _A ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError("surface_area_cylinder() only accepts non-negative values" )
return 2 * pi * radius * (height + radius)
def _A ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
if torus_radius < 0 or tube_radius < 0:
raise ValueError("surface_area_torus() only accepts non-negative values" )
if torus_radius < tube_radius:
raise ValueError(
"surface_area_torus() does not support spindle or self intersecting tori" )
return 4 * pow(SCREAMING_SNAKE_CASE , 2 ) * torus_radius * tube_radius
def _A ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
if length < 0 or width < 0:
raise ValueError("area_rectangle() only accepts non-negative values" )
return length * width
def _A ( SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
if side_length < 0:
raise ValueError("area_square() only accepts non-negative values" )
return side_length**2
def _A ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError("area_triangle() only accepts non-negative values" )
return (base * height) / 2
def _A ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("area_triangle_three_sides() only accepts non-negative values" )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("Given three sides do not form a triangle" )
a__ : str =(sidea + sidea + sidea) / 2
a__ : Optional[int] =sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def _A ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError("area_parallelogram() only accepts non-negative values" )
return base * height
def _A ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
if basea < 0 or basea < 0 or height < 0:
raise ValueError("area_trapezium() only accepts non-negative values" )
return 1 / 2 * (basea + basea) * height
def _A ( SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
if radius < 0:
raise ValueError("area_circle() only accepts non-negative values" )
return pi * radius**2
def _A ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
if radius_x < 0 or radius_y < 0:
raise ValueError("area_ellipse() only accepts non-negative values" )
return pi * radius_x * radius_y
def _A ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("area_rhombus() only accepts non-negative values" )
return 1 / 2 * diagonal_a * diagonal_a
def _A ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or sides < 3:
raise ValueError(
"area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides" )
elif length < 0:
raise ValueError(
"area_reg_polygon() only accepts non-negative values as \
length of a side" )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("""[DEMO] Areas of various geometric shapes: \n""")
print(F"""Rectangle: {area_rectangle(10, 20) = }""")
print(F"""Square: {area_square(10) = }""")
print(F"""Triangle: {area_triangle(10, 10) = }""")
print(F"""Triangle: {area_triangle_three_sides(5, 12, 13) = }""")
print(F"""Parallelogram: {area_parallelogram(10, 20) = }""")
print(F"""Rhombus: {area_rhombus(10, 20) = }""")
print(F"""Trapezium: {area_trapezium(10, 20, 30) = }""")
print(F"""Circle: {area_circle(20) = }""")
print(F"""Ellipse: {area_ellipse(10, 20) = }""")
print("""\nSurface Areas of various geometric shapes: \n""")
print(F"""Cube: {surface_area_cube(20) = }""")
print(F"""Cuboid: {surface_area_cuboid(10, 20, 30) = }""")
print(F"""Sphere: {surface_area_sphere(20) = }""")
print(F"""Hemisphere: {surface_area_hemisphere(20) = }""")
print(F"""Cone: {surface_area_cone(10, 20) = }""")
print(F"""Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }""")
print(F"""Cylinder: {surface_area_cylinder(10, 20) = }""")
print(F"""Torus: {surface_area_torus(20, 10) = }""")
print(F"""Equilateral Triangle: {area_reg_polygon(3, 10) = }""")
print(F"""Square: {area_reg_polygon(4, 10) = }""")
print(F"""Reqular Pentagon: {area_reg_polygon(5, 10) = }""")
| 563
|
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCAmelCase : Dict = logging.get_logger(__name__)
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : List[str] = ["""input_values""", """attention_mask"""]
def __init__( self , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 1_6_0_0_0 , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = False , lowerCAmelCase__ = 8_0 , lowerCAmelCase__ = 1_6 , lowerCAmelCase__ = 6_4 , lowerCAmelCase__ = "hann_window" , lowerCAmelCase__ = 1.0 , lowerCAmelCase__ = 8_0 , lowerCAmelCase__ = 7_6_0_0 , lowerCAmelCase__ = 1E-10 , lowerCAmelCase__ = 2 , lowerCAmelCase__ = True , **lowerCAmelCase__ , ) -> str:
'''simple docstring'''
super().__init__(feature_size=lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , padding_value=lowerCAmelCase__ , **lowerCAmelCase__ )
a__ : Tuple =do_normalize
a__ : Tuple =return_attention_mask
a__ : str =num_mel_bins
a__ : Any =hop_length
a__ : Optional[Any] =win_length
a__ : int =win_function
a__ : List[str] =frame_signal_scale
a__ : List[str] =fmin
a__ : str =fmax
a__ : Dict =mel_floor
a__ : Any =reduction_factor
a__ : str =win_length * sampling_rate // 1_0_0_0
a__ : List[str] =hop_length * sampling_rate // 1_0_0_0
a__ : Optional[Any] =optimal_fft_length(self.sample_size )
a__ : Any =(self.n_fft // 2) + 1
a__ : List[Any] =window_function(window_length=self.sample_size , name=self.win_function , periodic=lowerCAmelCase__ )
a__ : Optional[int] =mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm="slaney" , mel_scale="slaney" , )
if frame_signal_scale != 1.0:
warnings.warn(
"The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers" , lowerCAmelCase__ , )
if reduction_factor != 2.0:
warnings.warn(
"The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers" , lowerCAmelCase__ , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _lowercase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 0.0 ) -> List[np.ndarray]:
'''simple docstring'''
if attention_mask is not None:
a__ : List[Any] =np.array(lowerCAmelCase__ , np.intaa )
a__ : Optional[Any] =[]
for vector, length in zip(lowerCAmelCase__ , attention_mask.sum(-1 ) ):
a__ : Tuple =(vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
a__ : Any =padding_value
normed_input_values.append(lowerCAmelCase__ )
else:
a__ : Optional[int] =[(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def _lowercase ( self , lowerCAmelCase__ , ) -> np.ndarray:
'''simple docstring'''
a__ : Dict =spectrogram(
lowerCAmelCase__ , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel="log10" , )
return log_mel_spec.T
def __call__( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> BatchFeature:
'''simple docstring'''
if audio is None and audio_target is None:
raise ValueError("You must provide either `audio` or `audio_target` values." )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if audio is not None:
a__ : Dict =self._process_audio(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ , )
else:
a__ : str =None
if audio_target is not None:
a__ : int =self._process_audio(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ , )
if inputs is None:
return inputs_target
else:
a__ : Any =inputs_target["input_values"]
a__ : List[Any] =inputs_target.get("attention_mask" )
if decoder_attention_mask is not None:
a__ : Optional[int] =decoder_attention_mask
return inputs
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> BatchFeature:
'''simple docstring'''
a__ : List[Any] =isinstance(lowerCAmelCase__ , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
a__ : Optional[int] =is_batched_numpy or (
isinstance(lowerCAmelCase__ , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
a__ : int =[np.asarray(lowerCAmelCase__ , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(lowerCAmelCase__ , np.ndarray ):
a__ : List[Any] =np.asarray(lowerCAmelCase__ , dtype=np.floataa )
elif isinstance(lowerCAmelCase__ , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
a__ : Optional[Any] =speech.astype(np.floataa )
# always return batch
if not is_batched:
a__ : Union[str, Any] =[speech]
# needed to make pad() work on spectrogram inputs
a__ : Union[str, Any] =self.feature_size
# convert into correct format for padding
if is_target:
a__ : Dict =[self._extract_mel_features(lowerCAmelCase__ ) for waveform in speech]
a__ : str =BatchFeature({"input_values": features} )
a__ : List[str] =self.num_mel_bins
else:
a__ : List[str] =BatchFeature({"input_values": speech} )
a__ : Optional[int] =self.pad(
lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , )
a__ : Any =feature_size_hack
# convert input values to correct format
a__ : List[Any] =padded_inputs["input_values"]
if not isinstance(input_values[0] , np.ndarray ):
a__ : Union[str, Any] =[np.asarray(lowerCAmelCase__ , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(lowerCAmelCase__ , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
a__ : str =[array.astype(np.floataa ) for array in input_values]
elif isinstance(lowerCAmelCase__ , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
a__ : Optional[int] =input_values.astype(np.floataa )
# convert attention_mask to correct format
a__ : str =padded_inputs.get("attention_mask" )
if attention_mask is not None:
a__ : str =[np.asarray(lowerCAmelCase__ , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
a__ : Union[str, Any] =(
attention_mask
if self._get_padding_strategies(lowerCAmelCase__ , max_length=lowerCAmelCase__ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
a__ : List[Any] =self.zero_mean_unit_var_norm(
padded_inputs["input_values"] , attention_mask=lowerCAmelCase__ , padding_value=self.padding_value )
if return_tensors is not None:
a__ : int =padded_inputs.convert_to_tensors(lowerCAmelCase__ )
return padded_inputs
def _lowercase ( self ) -> Dict[str, Any]:
'''simple docstring'''
a__ : Optional[int] =super().to_dict()
# Don't serialize these as they are derived from the other properties.
a__ : Optional[Any] =["window", "mel_filters", "sample_size", "sample_stride", "n_fft", "n_freqs"]
for name in names:
if name in output:
del output[name]
return output
| 563
| 1
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class lowerCamelCase__( snake_case_ ):
UpperCamelCase : torch.FloatTensor
class lowerCamelCase__( snake_case_ , snake_case_ ):
@register_to_config
def __init__( self , __UpperCAmelCase = 3_2 , __UpperCAmelCase = 6_4 , __UpperCAmelCase = 2_0 , __UpperCAmelCase = 7_6_8 , __UpperCAmelCase=7_7 , __UpperCAmelCase=4 , __UpperCAmelCase = 0.0 , __UpperCAmelCase = "silu" , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = "linear" , __UpperCAmelCase = "prd" , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , ):
"""simple docstring"""
super().__init__()
__lowercase = num_attention_heads
__lowercase = attention_head_dim
__lowercase = num_attention_heads * attention_head_dim
__lowercase = additional_embeddings
__lowercase = time_embed_dim or inner_dim
__lowercase = embedding_proj_dim or embedding_dim
__lowercase = clip_embed_dim or embedding_dim
__lowercase = Timesteps(__UpperCAmelCase , __UpperCAmelCase , 0 )
__lowercase = TimestepEmbedding(__UpperCAmelCase , __UpperCAmelCase , out_dim=__UpperCAmelCase , act_fn=__UpperCAmelCase )
__lowercase = nn.Linear(__UpperCAmelCase , __UpperCAmelCase )
if embedding_proj_norm_type is None:
__lowercase = None
elif embedding_proj_norm_type == "layer":
__lowercase = nn.LayerNorm(__UpperCAmelCase )
else:
raise ValueError(F'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
__lowercase = nn.Linear(__UpperCAmelCase , __UpperCAmelCase )
if encoder_hid_proj_type is None:
__lowercase = None
elif encoder_hid_proj_type == "linear":
__lowercase = nn.Linear(__UpperCAmelCase , __UpperCAmelCase )
else:
raise ValueError(F'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
__lowercase = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , __UpperCAmelCase ) )
if added_emb_type == "prd":
__lowercase = nn.Parameter(torch.zeros(1 , 1 , __UpperCAmelCase ) )
elif added_emb_type is None:
__lowercase = None
else:
raise ValueError(
F'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
__lowercase = nn.ModuleList(
[
BasicTransformerBlock(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , dropout=__UpperCAmelCase , activation_fn="""gelu""" , attention_bias=__UpperCAmelCase , )
for d in range(__UpperCAmelCase )
] )
if norm_in_type == "layer":
__lowercase = nn.LayerNorm(__UpperCAmelCase )
elif norm_in_type is None:
__lowercase = None
else:
raise ValueError(F'''Unsupported norm_in_type: {norm_in_type}.''' )
__lowercase = nn.LayerNorm(__UpperCAmelCase )
__lowercase = nn.Linear(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0 )
causal_attention_mask.triu_(1 )
__lowercase = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" , __UpperCAmelCase , persistent=__UpperCAmelCase )
__lowercase = nn.Parameter(torch.zeros(1 , __UpperCAmelCase ) )
__lowercase = nn.Parameter(torch.zeros(1 , __UpperCAmelCase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = {}
def fn_recursive_add_processors(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
if hasattr(__UpperCAmelCase , """set_processor""" ):
__lowercase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'''{name}.{sub_name}''' , __UpperCAmelCase , __UpperCAmelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return processors
def __magic_name__ ( self , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = len(self.attn_processors.keys() )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) != count:
raise ValueError(
F'''A dict of processors was passed, but the number of processors {len(__UpperCAmelCase )} does not match the'''
F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
if hasattr(__UpperCAmelCase , """set_processor""" ):
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
module.set_processor(__UpperCAmelCase )
else:
module.set_processor(processor.pop(F'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'''{name}.{sub_name}''' , __UpperCAmelCase , __UpperCAmelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
self.set_attn_processor(AttnProcessor() )
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , ):
"""simple docstring"""
__lowercase = hidden_states.shape[0]
__lowercase = timestep
if not torch.is_tensor(__UpperCAmelCase ):
__lowercase = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(__UpperCAmelCase ) and len(timesteps.shape ) == 0:
__lowercase = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__lowercase = timesteps * torch.ones(__UpperCAmelCase , dtype=timesteps.dtype , device=timesteps.device )
__lowercase = self.time_proj(__UpperCAmelCase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
__lowercase = timesteps_projected.to(dtype=self.dtype )
__lowercase = self.time_embedding(__UpperCAmelCase )
if self.embedding_proj_norm is not None:
__lowercase = self.embedding_proj_norm(__UpperCAmelCase )
__lowercase = self.embedding_proj(__UpperCAmelCase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
__lowercase = self.encoder_hidden_states_proj(__UpperCAmelCase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""" )
__lowercase = self.proj_in(__UpperCAmelCase )
__lowercase = self.positional_embedding.to(hidden_states.dtype )
__lowercase = []
__lowercase = 0
if encoder_hidden_states is not None:
additional_embeds.append(__UpperCAmelCase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
__lowercase = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
__lowercase = hidden_states[:, None, :]
__lowercase = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
__lowercase = self.prd_embedding.to(hidden_states.dtype ).expand(__UpperCAmelCase , -1 , -1 )
additional_embeds.append(__UpperCAmelCase )
__lowercase = torch.cat(
__UpperCAmelCase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
__lowercase = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
__lowercase = F.pad(
__UpperCAmelCase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
__lowercase = hidden_states + positional_embeddings
if attention_mask is not None:
__lowercase = (1 - attention_mask.to(hidden_states.dtype )) * -1_00_00.0
__lowercase = F.pad(__UpperCAmelCase , (0, self.additional_embeddings) , value=0.0 )
__lowercase = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
__lowercase = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
__lowercase = self.norm_in(__UpperCAmelCase )
for block in self.transformer_blocks:
__lowercase = block(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
__lowercase = self.norm_out(__UpperCAmelCase )
if self.prd_embedding is not None:
__lowercase = hidden_states[:, -1]
else:
__lowercase = hidden_states[:, additional_embeddings_len:]
__lowercase = self.proj_to_clip_embeddings(__UpperCAmelCase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=__UpperCAmelCase )
def __magic_name__ ( self , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 339
|
'''simple docstring'''
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
snake_case : Optional[int] = 500_000
snake_case , snake_case : Optional[int] = os.path.split(__file__)
snake_case : Any = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def lowercase__ ( __UpperCamelCase : datasets.Dataset , **__UpperCamelCase : Optional[int] ):
'''simple docstring'''
__lowercase = dataset.map(**__UpperCamelCase )
@get_duration
def lowercase__ ( __UpperCamelCase : datasets.Dataset , **__UpperCamelCase : Optional[Any] ):
'''simple docstring'''
__lowercase = dataset.filter(**__UpperCamelCase )
def lowercase__ ( ):
'''simple docstring'''
__lowercase = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
__lowercase = generate_example_dataset(
os.path.join(__UpperCamelCase , """dataset.arrow""" ) , __UpperCamelCase , num_examples=__UpperCamelCase )
__lowercase = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=__UpperCamelCase )
def tokenize(__UpperCamelCase : List[Any] ):
return tokenizer(examples["""text"""] )
__lowercase = map(__UpperCamelCase )
__lowercase = map(__UpperCamelCase , batched=__UpperCamelCase )
__lowercase = map(__UpperCamelCase , function=lambda __UpperCamelCase : None , batched=__UpperCamelCase )
with dataset.formatted_as(type="""numpy""" ):
__lowercase = map(__UpperCamelCase , function=lambda __UpperCamelCase : None , batched=__UpperCamelCase )
with dataset.formatted_as(type="""pandas""" ):
__lowercase = map(__UpperCamelCase , function=lambda __UpperCamelCase : None , batched=__UpperCamelCase )
with dataset.formatted_as(type="""torch""" , columns="""numbers""" ):
__lowercase = map(__UpperCamelCase , function=lambda __UpperCamelCase : None , batched=__UpperCamelCase )
with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ):
__lowercase = map(__UpperCamelCase , function=lambda __UpperCamelCase : None , batched=__UpperCamelCase )
__lowercase = map(__UpperCamelCase , function=__UpperCamelCase , batched=__UpperCamelCase )
__lowercase = filter(__UpperCamelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(__UpperCamelCase , """wb""" ) as f:
f.write(json.dumps(__UpperCamelCase ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 339
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.