code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :Optional[int] , _SCREAMING_SNAKE_CASE :Dict , _SCREAMING_SNAKE_CASE :Any , _SCREAMING_SNAKE_CASE :int ) -> Tuple:
# Initialise PyTorch model
a_ : Tuple = BigBirdConfig.from_json_file(_SCREAMING_SNAKE_CASE )
print(F'''Building PyTorch model from configuration: {config}''' )
if is_trivia_qa:
a_ : Optional[int] = BigBirdForQuestionAnswering(_SCREAMING_SNAKE_CASE )
else:
a_ : int = BigBirdForPreTraining(_SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , is_trivia_qa=_SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--big_bird_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_trivia_qa', action='store_true', help='Whether to convert a model with a trivia_qa head.'
)
UpperCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 473
|
"""simple docstring"""
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class UpperCAmelCase__ ( __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = PriorTransformer
lowerCAmelCase__ : int = """hidden_states"""
@property
def A ( self ) -> Tuple:
a_ : Union[str, Any] = 4
a_ : Tuple = 8
a_ : Dict = 7
a_ : Dict = floats_tensor((batch_size, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
a_ : Tuple = floats_tensor((batch_size, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def A ( self , _SCREAMING_SNAKE_CASE=0 ) -> int:
torch.manual_seed(_SCREAMING_SNAKE_CASE )
a_ : int = 4
a_ : Dict = 8
a_ : Dict = 7
a_ : Dict = torch.randn((batch_size, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
a_ : Tuple = torch.randn((batch_size, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
a_ : List[Any] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def A ( self ) -> Optional[Any]:
return (4, 8)
@property
def A ( self ) -> Any:
return (4, 8)
def A ( self ) -> List[str]:
a_ : List[Any] = {
"num_attention_heads": 2,
"attention_head_dim": 4,
"num_layers": 2,
"embedding_dim": 8,
"num_embeddings": 7,
"additional_embeddings": 4,
}
a_ : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def A ( self ) -> Dict:
a_ , a_ : str = PriorTransformer.from_pretrained(
"hf-internal-testing/prior-dummy" , output_loading_info=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(_SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def A ( self ) -> str:
a_ , a_ : str = self.prepare_init_args_and_inputs_for_common()
a_ : List[Any] = self.model_class(**_SCREAMING_SNAKE_CASE )
a_ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ : List[Any] = [*signature.parameters.keys()]
a_ : Any = ["hidden_states", "timestep"]
self.assertListEqual(arg_names[:2] , _SCREAMING_SNAKE_CASE )
def A ( self ) -> Any:
a_ : Tuple = PriorTransformer.from_pretrained("hf-internal-testing/prior-dummy" )
a_ : Union[str, Any] = model.to(_SCREAMING_SNAKE_CASE )
if hasattr(_SCREAMING_SNAKE_CASE , "set_default_attn_processor" ):
model.set_default_attn_processor()
a_ : List[Any] = self.get_dummy_seed_input()
with torch.no_grad():
a_ : Dict = model(**_SCREAMING_SNAKE_CASE )[0]
a_ : Any = output[0, :5].flatten().cpu()
print(_SCREAMING_SNAKE_CASE )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
a_ : Optional[int] = torch.tensor([-1.3_4_3_6, -0.2_8_7_0, 0.7_5_3_8, 0.4_3_6_8, -0.0_2_3_9] )
self.assertTrue(torch_all_close(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , rtol=1E-2 ) )
@slow
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A ( self , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=7_6_8 , _SCREAMING_SNAKE_CASE=7_7 , _SCREAMING_SNAKE_CASE=0 ) -> Optional[int]:
torch.manual_seed(_SCREAMING_SNAKE_CASE )
a_ : List[str] = batch_size
a_ : Optional[Any] = embedding_dim
a_ : Tuple = num_embeddings
a_ : List[Any] = torch.randn((batch_size, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
a_ : List[Any] = torch.randn((batch_size, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
a_ : int = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def A ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[1_3, [-0.5_8_6_1, 0.1_2_8_3, -0.0_9_3_1, 0.0_8_8_2, 0.4_4_7_6, 0.1_3_2_9, -0.0_4_9_8, 0.0_6_4_0]],
[3_7, [-0.4_9_1_3, 0.0_1_1_0, -0.0_4_8_3, 0.0_5_4_1, 0.4_9_5_4, -0.0_1_7_0, 0.0_3_5_4, 0.1_6_5_1]],
# fmt: on
] )
def A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
a_ : List[Any] = PriorTransformer.from_pretrained("kandinsky-community/kandinsky-2-1-prior" , subfolder="prior" )
model.to(_SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = self.get_dummy_seed_input(seed=_SCREAMING_SNAKE_CASE )
with torch.no_grad():
a_ : int = model(**_SCREAMING_SNAKE_CASE )[0]
assert list(sample.shape ) == [1, 7_6_8]
a_ : str = sample[0, :8].flatten().cpu()
print(_SCREAMING_SNAKE_CASE )
a_ : int = torch.tensor(_SCREAMING_SNAKE_CASE )
assert torch_all_close(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 )
| 473
| 1
|
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 175
|
import csv
import tweepy
# Twitter API credentials
a = ""
a = ""
a = ""
a = ""
def _SCREAMING_SNAKE_CASE ( snake_case ) -> None:
# authorize twitter, initialize tweepy
_UpperCAmelCase = tweepy.OAuthHandler(snake_case , snake_case )
auth.set_access_token(snake_case , snake_case )
_UpperCAmelCase = tweepy.API(snake_case )
# initialize a list to hold all the tweepy Tweets
_UpperCAmelCase = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_UpperCAmelCase = api.user_timeline(screen_name=snake_case , count=2_0_0 )
# save most recent tweets
alltweets.extend(snake_case )
# save the id of the oldest tweet less one
_UpperCAmelCase = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(snake_case ) > 0:
print(f"getting tweets before {oldest}" )
# all subsequent requests use the max_id param to prevent duplicates
_UpperCAmelCase = api.user_timeline(
screen_name=snake_case , count=2_0_0 , max_id=snake_case )
# save most recent tweets
alltweets.extend(snake_case )
# update the id of the oldest tweet less one
_UpperCAmelCase = alltweets[-1].id - 1
print(f"...{len(snake_case )} tweets downloaded so far" )
# transform the tweepy tweets into a 2D array that will populate the csv
_UpperCAmelCase = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f"new_{screen_name}_tweets.csv" , """w""" ) as f:
_UpperCAmelCase = csv.writer(snake_case )
writer.writerow(["""id""", """created_at""", """text"""] )
writer.writerows(snake_case )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("FirePing32")
| 175
| 1
|
"""simple docstring"""
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any]=False ):
"""simple docstring"""
snake_case_ : Optional[int] = OmegaConf.load(SCREAMING_SNAKE_CASE__ )
if display:
print(yaml.dump(OmegaConf.to_container(SCREAMING_SNAKE_CASE__ ) ) )
return config
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Dict=None ):
"""simple docstring"""
if conf_path is None:
snake_case_ : List[str] = """./model_checkpoints/vqgan_only.yaml"""
snake_case_ : Dict = load_config(SCREAMING_SNAKE_CASE__ , display=SCREAMING_SNAKE_CASE__ )
snake_case_ : Tuple = VQModel(**config.model.params )
if ckpt_path is None:
snake_case_ : Optional[Any] = """./model_checkpoints/vqgan_only.pt"""
snake_case_ : Tuple = torch.load(SCREAMING_SNAKE_CASE__ , map_location=SCREAMING_SNAKE_CASE__ )
if ".ckpt" in ckpt_path:
snake_case_ : Any = sd["""state_dict"""]
model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
del sd
return model
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
snake_case_ : Optional[Any] = model.encode(SCREAMING_SNAKE_CASE__ )
print(f'VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}' )
snake_case_ : List[Any] = model.decode(SCREAMING_SNAKE_CASE__ )
return xrec
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str=False ):
"""simple docstring"""
snake_case_ : Optional[int] = string.rsplit(""".""" , 1 )
if reload:
snake_case_ : Optional[int] = importlib.import_module(SCREAMING_SNAKE_CASE__ )
importlib.reload(SCREAMING_SNAKE_CASE__ )
return getattr(importlib.import_module(SCREAMING_SNAKE_CASE__ , package=SCREAMING_SNAKE_CASE__ ) , cls )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
if "target" not in config:
raise KeyError("""Expected key `target` to instantiate.""" )
return get_obj_from_str(config["""target"""] )(**config.get("""params""" , {} ) )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : List[Any]=True ):
"""simple docstring"""
snake_case_ : str = instantiate_from_config(SCREAMING_SNAKE_CASE__ )
if sd is not None:
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple ):
"""simple docstring"""
if ckpt:
snake_case_ : str = torch.load(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" )
snake_case_ : Any = pl_sd["""global_step"""]
print(f'loaded model from global step {global_step}.' )
else:
snake_case_ : List[Any] = {"""state_dict""": None}
snake_case_ : Optional[Any] = None
snake_case_ : Tuple = load_model_from_config(config.model , pl_sd["""state_dict"""] , gpu=SCREAMING_SNAKE_CASE__ , eval_mode=SCREAMING_SNAKE_CASE__ )["""model"""]
return model, global_step
| 480
|
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class __A ( unittest.TestCase ):
def A__ ( self :Tuple ):
'''simple docstring'''
debug_launcher(test_script.main )
def A__ ( self :Dict ):
'''simple docstring'''
debug_launcher(test_ops.main )
| 21
| 0
|
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __lowercase ( unittest.TestCase ):
def __init__( self : Optional[Any] , lowercase__ : Tuple , lowercase__ : str=1_3 , lowercase__ : Optional[Any]=7 , lowercase__ : Dict=True , lowercase__ : int=True , lowercase__ : int=True , lowercase__ : Dict=True , lowercase__ : Dict=9_9 , lowercase__ : str=3_2 , lowercase__ : Optional[Any]=5 , lowercase__ : Any=4 , lowercase__ : List[Any]=3_7 , lowercase__ : str="gelu" , lowercase__ : Dict=0.1 , lowercase__ : List[Any]=0.1 , lowercase__ : Optional[Any]=5_1_2 , lowercase__ : Any=1_6 , lowercase__ : str=2 , lowercase__ : Tuple=0.02 , lowercase__ : int=4 , ):
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_attention_mask
a_ = use_token_type_ids
a_ = use_labels
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = num_choices
def __magic_name__ ( self : str ):
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ = None
if self.use_attention_mask:
a_ = random_attention_mask([self.batch_size, self.seq_length] )
a_ = None
if self.use_token_type_ids:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a_ = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __magic_name__ ( self : Dict ):
a_ = self.prepare_config_and_inputs()
a_ = config_and_inputs
a_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def __magic_name__ ( self : Optional[Any] ):
a_ = self.prepare_config_and_inputs()
a_ = config_and_inputs
a_ = True
a_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
a_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __lowercase ( lowercase__ , unittest.TestCase ):
_lowerCAmelCase = True
_lowerCAmelCase = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __magic_name__ ( self : List[Any] ):
a_ = FlaxRobertaModelTester(self )
@slow
def __magic_name__ ( self : Optional[int] ):
for model_class_name in self.all_model_classes:
a_ = model_class_name.from_pretrained('''roberta-base''' , from_pt=UpperCAmelCase__ )
a_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase__ )
| 700
|
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
UpperCamelCase__ = logging.get_logger(__name__)
class __lowercase ( a__ ):
def __init__( self : List[Any] , *lowercase__ : Tuple , **lowercase__ : List[Any] ):
warnings.warn(
'''The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PoolFormerImageProcessor instead.''' , lowercase__ , )
super().__init__(*lowercase__ , **lowercase__ )
| 143
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any]=7 , SCREAMING_SNAKE_CASE : int=3 , SCREAMING_SNAKE_CASE : Optional[Any]=18 , SCREAMING_SNAKE_CASE : Dict=30 , SCREAMING_SNAKE_CASE : List[str]=400 , SCREAMING_SNAKE_CASE : Tuple=True , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : int=True , ):
_A : str = size if size is not None else {'height': 18, 'width': 18}
_A : str = parent
_A : Optional[Any] = batch_size
_A : Optional[Any] = num_channels
_A : int = image_size
_A : Any = min_resolution
_A : List[Any] = max_resolution
_A : Optional[int] = do_resize
_A : Optional[int] = size
_A : List[str] = apply_ocr
def A ( self : str):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __lowerCamelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
a = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def A ( self : List[Any]):
_A : Optional[int] = LayoutLMvaImageProcessingTester(self)
@property
def A ( self : Optional[Any]):
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : Union[str, Any]):
_A : Tuple = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , 'do_resize'))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , 'size'))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , 'apply_ocr'))
def A ( self : Union[str, Any]):
_A : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'height': 18, 'width': 18})
_A : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42)
self.assertEqual(image_processor.size , {'height': 42, 'width': 42})
def A ( self : List[Any]):
pass
def A ( self : Optional[int]):
# Initialize image_processing
_A : str = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , Image.Image)
# Test not batched input
_A : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt')
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , SCREAMING_SNAKE_CASE)
self.assertIsInstance(encoding.boxes , SCREAMING_SNAKE_CASE)
# Test batched
_A : Dict = image_processing(SCREAMING_SNAKE_CASE , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def A ( self : Tuple):
# Initialize image_processing
_A : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
_A : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , numpify=SCREAMING_SNAKE_CASE)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , np.ndarray)
# Test not batched input
_A : int = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_A : int = image_processing(SCREAMING_SNAKE_CASE , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def A ( self : Union[str, Any]):
# Initialize image_processing
_A : Optional[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_A : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , torchify=SCREAMING_SNAKE_CASE)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor)
# Test not batched input
_A : int = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_A : Tuple = image_processing(SCREAMING_SNAKE_CASE , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def A ( self : int):
# with apply_OCR = True
_A : Any = LayoutLMvaImageProcessor()
from datasets import load_dataset
_A : int = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test')
_A : List[Any] = Image.open(ds[0]['file']).convert('RGB')
_A : Any = image_processing(SCREAMING_SNAKE_CASE , return_tensors='pt')
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224))
self.assertEqual(len(encoding.words) , len(encoding.boxes))
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
_A : List[Any] = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
_A : Union[str, Any] = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , SCREAMING_SNAKE_CASE)
self.assertListEqual(encoding.boxes , SCREAMING_SNAKE_CASE)
# with apply_OCR = False
_A : List[str] = LayoutLMvaImageProcessor(apply_ocr=SCREAMING_SNAKE_CASE)
_A : Tuple = image_processing(SCREAMING_SNAKE_CASE , return_tensors='pt')
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224))
| 128
|
'''simple docstring'''
from math import factorial
def lowerCAmelCase__ ( lowerCamelCase : int ,lowerCamelCase : int ,lowerCamelCase : float ):
if successes > trials:
raise ValueError('successes must be lower or equal to trials' )
if trials < 0 or successes < 0:
raise ValueError('the function is defined for non-negative integers' )
if not isinstance(lowerCamelCase ,lowerCamelCase ) or not isinstance(lowerCamelCase ,lowerCamelCase ):
raise ValueError('the function is defined for non-negative integers' )
if not 0 < prob < 1:
raise ValueError('prob has to be in range of 1 - 0' )
_A : str = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
_A : Any = float(factorial(lowerCamelCase ) )
coefficient /= factorial(lowerCamelCase ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75))
| 128
| 1
|
"""simple docstring"""
def a_ ( lowerCamelCase ):
if not isinstance(lowerCamelCase , lowerCamelCase ) or number < 0:
raise ValueError('Input must be a non-negative integer' )
UpperCAmelCase__ = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 632
|
"""simple docstring"""
lowerCAmelCase__ : Tuple = range(2, 20 + 1)
lowerCAmelCase__ : Optional[Any] = [10**k for k in range(ks[-1] + 1)]
lowerCAmelCase__ : dict[int, dict[int, list[list[int]]]] = {}
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = sum(a_i[j] for j in range(lowerCamelCase , len(lowerCamelCase ) ) )
UpperCAmelCase__ = sum(a_i[j] * base[j] for j in range(min(len(lowerCamelCase ) , lowerCamelCase ) ) )
UpperCAmelCase__ , UpperCAmelCase__ = 0, 0
UpperCAmelCase__ = n - i
UpperCAmelCase__ = memo.get(lowerCamelCase )
if sub_memo is not None:
UpperCAmelCase__ = sub_memo.get(lowerCamelCase )
if jumps is not None and len(lowerCamelCase ) > 0:
# find and make the largest jump without going over
UpperCAmelCase__ = -1
for _k in range(len(lowerCamelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
UpperCAmelCase__ = _k
break
if max_jump >= 0:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = jumps[max_jump]
# since the difference between jumps is cached, add c
UpperCAmelCase__ = diff + c
for j in range(min(lowerCamelCase , len(lowerCamelCase ) ) ):
UpperCAmelCase__ , UpperCAmelCase__ = divmod(lowerCamelCase , 1_0 )
if new_c > 0:
add(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
UpperCAmelCase__ = []
else:
UpperCAmelCase__ = {c: []}
UpperCAmelCase__ = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
UpperCAmelCase__ , UpperCAmelCase__ = next_term(lowerCamelCase , k - 1 , i + dn , lowerCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
UpperCAmelCase__ , UpperCAmelCase__ = compute(lowerCamelCase , lowerCamelCase , i + dn , lowerCamelCase )
diff += _diff
dn += terms_jumped
UpperCAmelCase__ = sub_memo[c]
# keep jumps sorted by # of terms skipped
UpperCAmelCase__ = 0
while j < len(lowerCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(lowerCamelCase , (diff, dn, k) )
return (diff, dn)
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if i >= n:
return 0, i
if k > len(lowerCamelCase ):
a_i.extend([0 for _ in range(k - len(lowerCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
UpperCAmelCase__ = i
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = 0, 0, 0
for j in range(len(lowerCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
UpperCAmelCase__ = ds_c + ds_b
diff += addend
UpperCAmelCase__ = 0
for j in range(lowerCamelCase ):
UpperCAmelCase__ = a_i[j] + addend
UpperCAmelCase__ , UpperCAmelCase__ = divmod(lowerCamelCase , 1_0 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return diff, i - start_i
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
for j in range(lowerCamelCase , len(lowerCamelCase ) ):
UpperCAmelCase__ = digits[j] + addend
if s >= 1_0:
UpperCAmelCase__ , UpperCAmelCase__ = divmod(lowerCamelCase , 1_0 )
UpperCAmelCase__ = addend // 1_0 + quotient
else:
UpperCAmelCase__ = s
UpperCAmelCase__ = addend // 1_0
if addend == 0:
break
while addend > 0:
UpperCAmelCase__ , UpperCAmelCase__ = divmod(lowerCamelCase , 1_0 )
digits.append(lowerCamelCase )
def a_ ( lowerCamelCase = 1_0**1_5 ):
UpperCAmelCase__ = [1]
UpperCAmelCase__ = 1
UpperCAmelCase__ = 0
while True:
UpperCAmelCase__ , UpperCAmelCase__ = next_term(lowerCamelCase , 2_0 , i + dn , lowerCamelCase )
dn += terms_jumped
if dn == n - i:
break
UpperCAmelCase__ = 0
for j in range(len(lowerCamelCase ) ):
a_n += digits[j] * 1_0**j
return a_n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 632
| 1
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : int = logging.get_logger(__name__)
def __snake_case ( __magic_name__ , __magic_name__=False ):
'''simple docstring'''
lowercase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowercase = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def __snake_case ( __magic_name__ , __magic_name__ , __magic_name__=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
lowercase = ""
else:
lowercase = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
lowercase = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase = in_proj_weight[
: config.hidden_size, :
]
lowercase = in_proj_bias[: config.hidden_size]
lowercase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase = in_proj_weight[
-config.hidden_size :, :
]
lowercase = in_proj_bias[-config.hidden_size :]
def __snake_case ( __magic_name__ ):
'''simple docstring'''
lowercase = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
def __snake_case ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
lowercase = dct.pop(__magic_name__ )
lowercase = val
def __snake_case ( ):
'''simple docstring'''
lowercase = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def __snake_case ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
lowercase = ViTConfig()
lowercase = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
lowercase = True
lowercase = int(vit_name[-12:-10] )
lowercase = int(vit_name[-9:-6] )
else:
lowercase = 1000
lowercase = "huggingface/label-files"
lowercase = "imagenet-1k-id2label.json"
lowercase = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="dataset" ) , "r" ) )
lowercase = {int(__magic_name__ ): v for k, v in idalabel.items()}
lowercase = idalabel
lowercase = {v: k for k, v in idalabel.items()}
lowercase = int(vit_name[-6:-4] )
lowercase = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("tiny" ):
lowercase = 192
lowercase = 768
lowercase = 12
lowercase = 3
elif vit_name[9:].startswith("small" ):
lowercase = 384
lowercase = 1536
lowercase = 12
lowercase = 6
else:
pass
else:
if vit_name[4:].startswith("small" ):
lowercase = 768
lowercase = 2304
lowercase = 8
lowercase = 8
elif vit_name[4:].startswith("base" ):
pass
elif vit_name[4:].startswith("large" ):
lowercase = 1024
lowercase = 4096
lowercase = 24
lowercase = 16
elif vit_name[4:].startswith("huge" ):
lowercase = 1280
lowercase = 5120
lowercase = 32
lowercase = 16
# load original model from timm
lowercase = timm.create_model(__magic_name__ , pretrained=__magic_name__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowercase = timm_model.state_dict()
if base_model:
remove_classification_head_(__magic_name__ )
lowercase = create_rename_keys(__magic_name__ , __magic_name__ )
for src, dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
read_in_q_k_v(__magic_name__ , __magic_name__ , __magic_name__ )
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowercase = ViTModel(__magic_name__ ).eval()
else:
lowercase = ViTForImageClassification(__magic_name__ ).eval()
model.load_state_dict(__magic_name__ )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
lowercase = DeiTImageProcessor(size=config.image_size )
else:
lowercase = ViTImageProcessor(size=config.image_size )
lowercase = image_processor(images=prepare_img() , return_tensors="pt" )
lowercase = encoding["pixel_values"]
lowercase = model(__magic_name__ )
if base_model:
lowercase = timm_model.forward_features(__magic_name__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__magic_name__ , outputs.pooler_output , atol=1e-3 )
else:
lowercase = timm_model(__magic_name__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__magic_name__ , outputs.logits , atol=1e-3 )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__magic_name__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__magic_name__ )
if __name__ == "__main__":
_snake_case : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_patch16_224",
type=str,
help="Name of the ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_snake_case : List[Any] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 441
|
# Function to print upper half of diamond (pyramid)
def __snake_case ( __magic_name__ ):
'''simple docstring'''
for i in range(0 , __magic_name__ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(" " , end="" )
for _ in range(0 , i + 1 ): # printing stars
print("* " , end="" )
print()
def __snake_case ( __magic_name__ ):
'''simple docstring'''
for i in range(__magic_name__ , 0 , -1 ):
for _ in range(__magic_name__ , 0 , -1 ): # printing stars
print("* " , end="" )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(" " , end="" )
def __snake_case ( __magic_name__ ):
'''simple docstring'''
if n <= 0:
print(" ... .... nothing printing :(" )
return
floyd(__magic_name__ ) # upper half
reverse_floyd(__magic_name__ ) # lower half
if __name__ == "__main__":
print(r"| /\ | |- | |- |--| |\ /| |-")
print(r"|/ \| |- |_ |_ |__| | \/ | |_")
_snake_case : Union[str, Any] = 1
while K:
_snake_case : Dict = int(input("enter the number and , and see the magic : "))
print()
pretty_print(user_number)
_snake_case : Tuple = int(input("press 0 to exit... and 1 to continue..."))
print("Good Bye...")
| 441
| 1
|
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowerCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase)
class snake_case__ ( __UpperCAmelCase):
'''simple docstring'''
def __init__( self , **a__ ) -> int:
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , a__ , **a__ ) -> Tuple:
'''simple docstring'''
return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def __lowercase ( self , **a__ ) -> int:
'''simple docstring'''
__snake_case :Dict = {}
if "candidate_labels" in kwargs:
__snake_case :int = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
__snake_case :Dict = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def __lowercase ( self , a__ , a__=None , a__="This is a photo of {}." ) -> Optional[Any]:
'''simple docstring'''
__snake_case :str = load_image(__SCREAMING_SNAKE_CASE )
__snake_case :Optional[Any] = self.image_processor(images=[image] , return_tensors=self.framework )
__snake_case :Optional[Any] = candidate_labels
__snake_case :Optional[Any] = [hypothesis_template.format(__SCREAMING_SNAKE_CASE ) for x in candidate_labels]
__snake_case :Any = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=self.framework , padding=__SCREAMING_SNAKE_CASE )
__snake_case :List[str] = [text_inputs]
return inputs
def __lowercase ( self , a__ ) -> Optional[Any]:
'''simple docstring'''
__snake_case :str = model_inputs.pop("""candidate_labels""" )
__snake_case :List[str] = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , __SCREAMING_SNAKE_CASE ):
__snake_case :int = text_inputs[0]
else:
# Batching case.
__snake_case :List[Any] = text_inputs[0][0]
__snake_case :Union[str, Any] = self.model(**__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__snake_case :str = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_image,
}
return model_outputs
def __lowercase ( self , a__ ) -> Union[str, Any]:
'''simple docstring'''
__snake_case :Any = model_outputs.pop("""candidate_labels""" )
__snake_case :Optional[Any] = model_outputs["""logits"""][0]
if self.framework == "pt":
__snake_case :Tuple = logits.softmax(dim=-1 ).squeeze(-1 )
__snake_case :Optional[int] = probs.tolist()
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case :Optional[int] = [scores]
elif self.framework == "tf":
__snake_case :Union[str, Any] = stable_softmax(__SCREAMING_SNAKE_CASE , axis=-1 )
__snake_case :Any = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
__snake_case :Optional[int] = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , key=lambda a__ : -x[0] )
]
return result
| 716
|
import os
def UpperCamelCase ( ):
'''simple docstring'''
__snake_case :List[str] = os.path.dirname(os.path.realpath(snake_case__ ) )
__snake_case :Union[str, Any] = os.path.join(snake_case__ ,"""triangle.txt""" )
with open(snake_case__ ) as f:
__snake_case :int = f.readlines()
__snake_case :int = []
for line in triangle:
__snake_case :List[Any] = []
for number in line.strip().split(""" """ ):
numbers_from_line.append(int(snake_case__ ) )
a.append(snake_case__ )
for i in range(1 ,len(snake_case__ ) ):
for j in range(len(a[i] ) ):
__snake_case :Union[str, Any] = a[i - 1][j] if j != len(a[i - 1] ) else 0
__snake_case :Dict = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(snake_case__ ,snake_case__ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 291
| 0
|
"""simple docstring"""
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class a__ :
def __init__( self : List[str] , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict=13 , UpperCamelCase_ : Tuple=7 , UpperCamelCase_ : int=True , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : Optional[Any]=False , UpperCamelCase_ : Any=True , UpperCamelCase_ : List[Any]=99 , UpperCamelCase_ : List[Any]=32 , UpperCamelCase_ : Dict=5 , UpperCamelCase_ : List[Any]=4 , UpperCamelCase_ : List[str]=37 , UpperCamelCase_ : Any="gelu" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Tuple=0.1 , UpperCamelCase_ : List[str]=512 , UpperCamelCase_ : Dict=16 , UpperCamelCase_ : Tuple=2 , UpperCamelCase_ : Optional[Any]=0.02 , UpperCamelCase_ : List[str]=3 , UpperCamelCase_ : Tuple=4 , UpperCamelCase_ : List[str]=None , ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = parent
__UpperCAmelCase : int = batch_size
__UpperCAmelCase : str = seq_length
__UpperCAmelCase : Any = is_training
__UpperCAmelCase : Dict = use_input_mask
__UpperCAmelCase : Tuple = use_token_type_ids
__UpperCAmelCase : List[Any] = use_labels
__UpperCAmelCase : str = vocab_size
__UpperCAmelCase : int = hidden_size
__UpperCAmelCase : Any = num_hidden_layers
__UpperCAmelCase : Any = num_attention_heads
__UpperCAmelCase : Tuple = intermediate_size
__UpperCAmelCase : int = hidden_act
__UpperCAmelCase : Dict = hidden_dropout_prob
__UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
__UpperCAmelCase : List[str] = max_position_embeddings
__UpperCAmelCase : Optional[Any] = type_vocab_size
__UpperCAmelCase : Optional[Any] = type_sequence_label_size
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : Optional[int] = num_labels
__UpperCAmelCase : int = num_choices
__UpperCAmelCase : Dict = scope
def a_ ( self : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__UpperCAmelCase : List[str] = None
if self.use_input_mask:
__UpperCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length])
__UpperCAmelCase : Union[str, Any] = None
if self.use_token_type_ids:
__UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : Optional[Any] = None
if self.use_labels:
__UpperCAmelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices)
__UpperCAmelCase : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a_ ( self : int):
"""simple docstring"""
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
def a_ ( self : Any , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : Any):
"""simple docstring"""
__UpperCAmelCase : Tuple = BioGptModel(config=UpperCamelCase_)
model.to(UpperCamelCase_)
model.eval()
__UpperCAmelCase : Optional[int] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_)
__UpperCAmelCase : Tuple = model(UpperCamelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def a_ ( self : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple , UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : Dict , UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : int , ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = BioGptForCausalLM(config=UpperCamelCase_)
model.to(UpperCamelCase_)
model.eval()
__UpperCAmelCase : Optional[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def a_ ( self : str , UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : str , UpperCamelCase_ : Any , UpperCamelCase_ : List[str] , *UpperCamelCase_ : Any):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = BioGptModel(config=UpperCamelCase_)
model.to(UpperCamelCase_)
model.eval()
# create attention mask
__UpperCAmelCase : List[Any] = torch.ones(input_ids.shape , dtype=torch.long , device=UpperCamelCase_)
__UpperCAmelCase : List[Any] = self.seq_length // 2
__UpperCAmelCase : Any = 0
# first forward pass
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_).to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCAmelCase : Optional[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size)
# change a random masked slice from input_ids
__UpperCAmelCase : Optional[Any] = ids_tensor((1,) , UpperCamelCase_).item() + 1
__UpperCAmelCase : Dict = ids_tensor((self.batch_size, 1) , config.vocab_size).squeeze(-1)
__UpperCAmelCase : str = random_other_next_tokens
# append to next input_ids and attn_mask
__UpperCAmelCase : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1)
__UpperCAmelCase : Optional[int] = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=UpperCamelCase_)] , dim=1 , )
# get two different outputs
__UpperCAmelCase : Optional[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_)["last_hidden_state"]
__UpperCAmelCase : Any = model(UpperCamelCase_ , past_key_values=UpperCamelCase_ , attention_mask=UpperCamelCase_)["last_hidden_state"]
# select random slice
__UpperCAmelCase : List[str] = ids_tensor((1,) , output_from_past.shape[-1]).item()
__UpperCAmelCase : List[str] = output_from_no_past[:, -1, random_slice_idx].detach()
__UpperCAmelCase : Dict = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-3))
def a_ ( self : Union[str, Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Tuple , UpperCamelCase_ : str , UpperCamelCase_ : Any , *UpperCamelCase_ : List[Any]):
"""simple docstring"""
__UpperCAmelCase : str = BioGptModel(config=UpperCamelCase_).to(UpperCamelCase_).eval()
__UpperCAmelCase : Any = torch.ones(input_ids.shape , dtype=torch.long , device=UpperCamelCase_)
# first forward pass
__UpperCAmelCase : int = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , use_cache=UpperCamelCase_)
__UpperCAmelCase , __UpperCAmelCase : Tuple = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__UpperCAmelCase : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size)
__UpperCAmelCase : Optional[int] = ids_tensor((self.batch_size, 3) , 2)
# append to next input_ids and
__UpperCAmelCase : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1)
__UpperCAmelCase : List[str] = torch.cat([attention_mask, next_attn_mask] , dim=-1)
__UpperCAmelCase : Any = model(UpperCamelCase_ , attention_mask=UpperCamelCase_)["last_hidden_state"]
__UpperCAmelCase : List[str] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_)[
"last_hidden_state"
]
# select random slice
__UpperCAmelCase : List[str] = ids_tensor((1,) , output_from_past.shape[-1]).item()
__UpperCAmelCase : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCAmelCase : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-3))
def a_ ( self : Union[str, Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str , *UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[str]=False):
"""simple docstring"""
__UpperCAmelCase : Dict = BioGptForCausalLM(UpperCamelCase_)
model.to(UpperCamelCase_)
if gradient_checkpointing:
model.gradient_checkpointing_enable()
__UpperCAmelCase : List[Any] = model(UpperCamelCase_ , labels=UpperCamelCase_)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
result.loss.backward()
def a_ ( self : str , UpperCamelCase_ : int , *UpperCamelCase_ : List[str]):
"""simple docstring"""
__UpperCAmelCase : List[str] = BioGptModel(UpperCamelCase_)
__UpperCAmelCase : List[str] = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers)
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std) , 0.001)
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0) , 0.01)
def a_ ( self : Optional[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , *UpperCamelCase_ : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : List[Any] = self.num_labels
__UpperCAmelCase : Any = BioGptForTokenClassification(UpperCamelCase_)
model.to(UpperCamelCase_)
model.eval()
__UpperCAmelCase : Optional[int] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def a_ ( self : Dict):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : Dict = config_and_inputs
__UpperCAmelCase : Any = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class a__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase_ = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
lowercase_ = (BioGptForCausalLM,) if is_torch_available() else ()
lowercase_ = (
{
"feature-extraction": BioGptModel,
"text-classification": BioGptForSequenceClassification,
"text-generation": BioGptForCausalLM,
"token-classification": BioGptForTokenClassification,
"zero-shot": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase_ = False
def a_ ( self : str):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = BioGptModelTester(self)
__UpperCAmelCase : Dict = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37)
def a_ ( self : List[Any]):
"""simple docstring"""
self.config_tester.run_common_tests()
def a_ ( self : Any):
"""simple docstring"""
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_)
def a_ ( self : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCAmelCase : Union[str, Any] = type
self.model_tester.create_and_check_model(*UpperCamelCase_)
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*UpperCamelCase_)
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*UpperCamelCase_ , gradient_checkpointing=UpperCamelCase_)
def a_ ( self : List[Any]):
"""simple docstring"""
__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*UpperCamelCase_)
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*UpperCamelCase_)
def a_ ( self : Dict):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*UpperCamelCase_)
@slow
def a_ ( self : List[str]):
"""simple docstring"""
__UpperCAmelCase : int = BioGptForCausalLM.from_pretrained("microsoft/biogpt")
model.to(UpperCamelCase_)
__UpperCAmelCase : Optional[Any] = BioGptTokenizer.from_pretrained("microsoft/biogpt")
__UpperCAmelCase : Tuple = "left"
# Define PAD Token = EOS Token = 50256
__UpperCAmelCase : Tuple = tokenizer.eos_token
__UpperCAmelCase : Union[str, Any] = model.config.eos_token_id
# use different length sentences to test batching
__UpperCAmelCase : str = [
"Hello, my dog is a little",
"Today, I",
]
__UpperCAmelCase : List[Any] = tokenizer(UpperCamelCase_ , return_tensors="pt" , padding=UpperCamelCase_)
__UpperCAmelCase : List[Any] = inputs["input_ids"].to(UpperCamelCase_)
__UpperCAmelCase : List[Any] = model.generate(
input_ids=UpperCamelCase_ , attention_mask=inputs["attention_mask"].to(UpperCamelCase_) , )
__UpperCAmelCase : Dict = tokenizer(sentences[0] , return_tensors="pt").input_ids.to(UpperCamelCase_)
__UpperCAmelCase : Tuple = model.generate(input_ids=UpperCamelCase_)
__UpperCAmelCase : Union[str, Any] = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().cpu().item()
__UpperCAmelCase : Optional[Any] = tokenizer(sentences[1] , return_tensors="pt").input_ids.to(UpperCamelCase_)
__UpperCAmelCase : str = model.generate(input_ids=UpperCamelCase_ , max_length=model.config.max_length - num_paddings)
__UpperCAmelCase : Any = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_)
__UpperCAmelCase : Optional[Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=UpperCamelCase_)
__UpperCAmelCase : Dict = tokenizer.decode(output_padded[0] , skip_special_tokens=UpperCamelCase_)
__UpperCAmelCase : Union[str, Any] = [
"Hello, my dog is a little bit bigger than a little bit.",
"Today, I have a good idea of how to use the information",
]
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_)
self.assertListEqual(UpperCamelCase_ , [non_padded_sentence, padded_sentence])
@slow
def a_ ( self : List[Any]):
"""simple docstring"""
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : str = BioGptModel.from_pretrained(UpperCamelCase_)
self.assertIsNotNone(UpperCamelCase_)
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Any = 3
__UpperCAmelCase : Union[str, Any] = input_dict["input_ids"]
__UpperCAmelCase : Any = input_ids.ne(1).to(UpperCamelCase_)
__UpperCAmelCase : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__UpperCAmelCase : Dict = BioGptForSequenceClassification(UpperCamelCase_)
model.to(UpperCamelCase_)
model.eval()
__UpperCAmelCase : Any = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def a_ ( self : List[str]):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : int = 3
__UpperCAmelCase : List[Any] = "multi_label_classification"
__UpperCAmelCase : str = input_dict["input_ids"]
__UpperCAmelCase : Any = input_ids.ne(1).to(UpperCamelCase_)
__UpperCAmelCase : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
__UpperCAmelCase : List[Any] = BioGptForSequenceClassification(UpperCamelCase_)
model.to(UpperCamelCase_)
model.eval()
__UpperCAmelCase : str = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@require_torch
class a__ ( unittest.TestCase ):
@slow
def a_ ( self : Dict):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = BioGptForCausalLM.from_pretrained("microsoft/biogpt")
__UpperCAmelCase : Optional[Any] = torch.tensor([[2, 4805, 9, 656, 21]])
__UpperCAmelCase : Any = model(UpperCamelCase_)[0]
__UpperCAmelCase : Any = 42384
__UpperCAmelCase : str = torch.Size((1, 5, vocab_size))
self.assertEqual(output.shape , UpperCamelCase_)
__UpperCAmelCase : List[Any] = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1e-4))
@slow
def a_ ( self : int):
"""simple docstring"""
__UpperCAmelCase : int = BioGptTokenizer.from_pretrained("microsoft/biogpt")
__UpperCAmelCase : Union[str, Any] = BioGptForCausalLM.from_pretrained("microsoft/biogpt")
model.to(UpperCamelCase_)
torch.manual_seed(0)
__UpperCAmelCase : Any = tokenizer("COVID-19 is" , return_tensors="pt").to(UpperCamelCase_)
__UpperCAmelCase : Union[str, Any] = model.generate(
**UpperCamelCase_ , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=UpperCamelCase_ , )
__UpperCAmelCase : Dict = tokenizer.decode(output_ids[0] , skip_special_tokens=UpperCamelCase_)
__UpperCAmelCase : Optional[int] = (
"COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the"
" causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and"
" territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),"
" and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and"
" more than 800,000 deaths."
)
self.assertEqual(UpperCamelCase_ , UpperCamelCase_)
| 77
|
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def _lowerCAmelCase ( ) -> Tuple:
'''simple docstring'''
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__snake_case = "__test_patch_submodule_mock__"
with patch_submodule(_test_patching , "os.path.join" , _lowerCAmelCase ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def _lowerCAmelCase ( ) -> int:
'''simple docstring'''
assert _test_patching.open is open
__snake_case = "__test_patch_submodule_builtin_mock__"
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , "open" , _lowerCAmelCase ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def _lowerCAmelCase ( ) -> Optional[Any]:
'''simple docstring'''
__snake_case = "__test_patch_submodule_missing_mock__"
with patch_submodule(_test_patching , "pandas.read_csv" , _lowerCAmelCase ):
pass
def _lowerCAmelCase ( ) -> Optional[Any]:
'''simple docstring'''
__snake_case = "__test_patch_submodule_missing_builtin_mock__"
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , "len" , _lowerCAmelCase ) is None
with patch_submodule(_test_patching , "len" , _lowerCAmelCase ):
assert _test_patching.len is mock
assert _test_patching.len is len
def _lowerCAmelCase ( ) -> Optional[Any]:
'''simple docstring'''
__snake_case = "__test_patch_submodule_start_and_stop_mock__"
__snake_case = patch_submodule(_test_patching , "open" , _lowerCAmelCase )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def _lowerCAmelCase ( ) -> Any:
'''simple docstring'''
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__snake_case = "__test_patch_submodule_successive_join__"
__snake_case = "__test_patch_submodule_successive_dirname__"
__snake_case = "__test_patch_submodule_successive_rename__"
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , "os.path.join" , _lowerCAmelCase ):
with patch_submodule(_test_patching , "os.rename" , _lowerCAmelCase ):
with patch_submodule(_test_patching , "os.path.dirname" , _lowerCAmelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , "os.rename" , _lowerCAmelCase ):
with patch_submodule(_test_patching , "os.path.join" , _lowerCAmelCase ):
with patch_submodule(_test_patching , "os.path.dirname" , _lowerCAmelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def _lowerCAmelCase ( ) -> str:
'''simple docstring'''
__snake_case = "__test_patch_submodule_doesnt_exist_mock__"
with patch_submodule(_test_patching , "__module_that_doesn_exist__.__attribute_that_doesn_exist__" , _lowerCAmelCase ):
pass
with patch_submodule(_test_patching , "os.__attribute_that_doesn_exist__" , _lowerCAmelCase ):
pass
| 371
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase__ = {
'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'],
'processing_speech_to_text': ['Speech2TextProcessor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ['Speech2TextTokenizer']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ['Speech2TextFeatureExtractor']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSpeech2TextForConditionalGeneration',
'TFSpeech2TextModel',
'TFSpeech2TextPreTrainedModel',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Speech2TextForConditionalGeneration',
'Speech2TextModel',
'Speech2TextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 430
|
"""simple docstring"""
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
UpperCAmelCase__ = pytest.mark.integration
@pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] )
def _UpperCAmelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] ) -> Union[str, Any]:
inspect_dataset(__lowerCamelCase , __lowerCamelCase )
_snake_case = path + '''.py'''
assert script_name in os.listdir(__lowerCamelCase )
assert "__pycache__" not in os.listdir(__lowerCamelCase )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' , ['''accuracy'''] )
def _UpperCAmelCase ( __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] ) -> Any:
inspect_metric(__lowerCamelCase , __lowerCamelCase )
_snake_case = path + '''.py'''
assert script_name in os.listdir(__lowerCamelCase )
assert "__pycache__" not in os.listdir(__lowerCamelCase )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def _UpperCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] ) -> Optional[Any]:
_snake_case = get_dataset_config_info(__lowerCamelCase , config_name=__lowerCamelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def _UpperCAmelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : Dict ) -> str:
with pytest.raises(__lowerCamelCase ):
get_dataset_config_info(__lowerCamelCase , config_name=__lowerCamelCase )
@pytest.mark.parametrize(
'''path, expected''' , [
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] , )
def _UpperCAmelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int ) -> Optional[Any]:
_snake_case = get_dataset_config_names(__lowerCamelCase )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' , [
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] , )
def _UpperCAmelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ) -> str:
_snake_case = get_dataset_infos(__lowerCamelCase )
assert list(infos.keys() ) == expected_configs
_snake_case = expected_configs[0]
assert expected_config in infos
_snake_case = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] ) -> Union[str, Any]:
_snake_case = get_dataset_infos(__lowerCamelCase )
assert expected_config in infos
_snake_case = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def _UpperCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : List[str] ) -> Dict:
with pytest.raises(__lowerCamelCase ):
get_dataset_split_names(__lowerCamelCase , config_name=__lowerCamelCase )
| 430
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : Tuple = logging.get_logger(__name__)
snake_case : Dict = {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json""",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class UpperCamelCase__ ( a_):
"""simple docstring"""
__UpperCAmelCase = """gpt_neox"""
def __init__( self : Optional[int] , UpperCamelCase_ : Union[str, Any]=5_0_4_3_2 , UpperCamelCase_ : Any=6_1_4_4 , UpperCamelCase_ : List[str]=4_4 , UpperCamelCase_ : Union[str, Any]=6_4 , UpperCamelCase_ : Optional[Any]=2_4_5_7_6 , UpperCamelCase_ : Tuple="gelu" , UpperCamelCase_ : Dict=0.25 , UpperCamelCase_ : Tuple=1_0_0_0_0 , UpperCamelCase_ : Optional[int]=0.0 , UpperCamelCase_ : List[str]=0.0 , UpperCamelCase_ : Any=0.1 , UpperCamelCase_ : int=2_0_4_8 , UpperCamelCase_ : List[Any]=0.02 , UpperCamelCase_ : Optional[Any]=1e-5 , UpperCamelCase_ : Tuple=True , UpperCamelCase_ : Any=0 , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : int=False , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Optional[int]=None , **UpperCamelCase_ : Dict , ):
'''simple docstring'''
super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
__magic_name__ = vocab_size
__magic_name__ = max_position_embeddings
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = rotary_pct
__magic_name__ = rotary_emb_base
__magic_name__ = attention_dropout
__magic_name__ = hidden_dropout
__magic_name__ = classifier_dropout
__magic_name__ = initializer_range
__magic_name__ = layer_norm_eps
__magic_name__ = use_cache
__magic_name__ = tie_word_embeddings
__magic_name__ = use_parallel_residual
__magic_name__ = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'The hidden size is not divisble by the number of attention heads! Make sure to update them!' )
def a__ ( self : List[Any] ):
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __lowerCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f"""got {self.rope_scaling}""" )
__magic_name__ = self.rope_scaling.get('type' , __lowerCamelCase )
__magic_name__ = self.rope_scaling.get('factor' , __lowerCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(__lowerCamelCase , __lowerCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 545
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
UpperCamelCase__ : Any = None
UpperCamelCase__ : List[str] = logging.get_logger(__name__)
UpperCamelCase__ : int = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase__ : Union[str, Any] = {
"""vocab_file""": {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""",
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"""
),
},
"""tokenizer_file""": {
"""google/bigbird-roberta-base""": (
"""https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"""
),
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"""
),
},
}
UpperCamelCase__ : Optional[int] = {
"""google/bigbird-roberta-base""": 4_096,
"""google/bigbird-roberta-large""": 4_096,
"""google/bigbird-base-trivia-itc""": 4_096,
}
UpperCamelCase__ : Tuple = """▁"""
class lowerCamelCase_ ( a_ ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = BigBirdTokenizer
SCREAMING_SNAKE_CASE_ = ['input_ids', 'attention_mask']
SCREAMING_SNAKE_CASE_ = []
def __init__( self : Any ,__lowerCamelCase : List[str]=None ,__lowerCamelCase : Union[str, Any]=None ,__lowerCamelCase : Optional[int]="<unk>" ,__lowerCamelCase : Dict="<s>" ,__lowerCamelCase : Tuple="</s>" ,__lowerCamelCase : List[str]="<pad>" ,__lowerCamelCase : Tuple="[SEP]" ,__lowerCamelCase : List[str]="[MASK]" ,__lowerCamelCase : List[Any]="[CLS]" ,**__lowerCamelCase : List[str] ,):
'''simple docstring'''
a = AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase ,__lowerCamelCase ) else bos_token
a = AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase ,__lowerCamelCase ) else eos_token
a = AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase ,__lowerCamelCase ) else unk_token
a = AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase ,__lowerCamelCase ) else pad_token
a = AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase ,__lowerCamelCase ) else cls_token
a = AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase ,__lowerCamelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
a = AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase ,__lowerCamelCase ) else mask_token
super().__init__(
__lowerCamelCase ,tokenizer_file=__lowerCamelCase ,bos_token=__lowerCamelCase ,eos_token=__lowerCamelCase ,unk_token=__lowerCamelCase ,sep_token=__lowerCamelCase ,pad_token=__lowerCamelCase ,cls_token=__lowerCamelCase ,mask_token=__lowerCamelCase ,**__lowerCamelCase ,)
a = vocab_file
a = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE_ ( self : Dict ,__lowerCamelCase : List[int] ,__lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,__lowerCamelCase : List[int] ,__lowerCamelCase : Optional[List[int]] = None ,__lowerCamelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1]
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,__lowerCamelCase : List[int] ,__lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ,__lowerCamelCase : str ,__lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
a = os.path.join(
__lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file ,__lowerCamelCase )
return (out_vocab_file,)
| 387
| 0
|
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
__snake_case = True
except (ImportError, AttributeError):
__snake_case = object
def A_ ( *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) ->str:
pass
__snake_case = False
__snake_case = logging.get_logger("""transformers-cli/serving""")
def A_ ( SCREAMING_SNAKE_CASE_ ) ->Any:
lowercase_ = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(SCREAMING_SNAKE_CASE_ , args.host , args.port , args.workers )
class _a ( __a ):
"""simple docstring"""
A_ = 4_2
class _a ( __a ):
"""simple docstring"""
A_ = 4_2
A_ = 4_2
class _a ( __a ):
"""simple docstring"""
A_ = 4_2
class _a ( __a ):
"""simple docstring"""
A_ = 4_2
class _a ( __a ):
"""simple docstring"""
@staticmethod
def lowerCamelCase__ ( lowercase_ : ArgumentParser ):
'''simple docstring'''
lowercase_ = parser.add_parser(
"""serve""" , help="""CLI tool to run inference requests through REST and GraphQL endpoints.""" )
serve_parser.add_argument(
"""--task""" , type=lowercase_ , choices=get_supported_tasks() , help="""The task to run the pipeline on""" , )
serve_parser.add_argument("""--host""" , type=lowercase_ , default="""localhost""" , help="""Interface the server will listen on.""" )
serve_parser.add_argument("""--port""" , type=lowercase_ , default=8_888 , help="""Port the serving will listen to.""" )
serve_parser.add_argument("""--workers""" , type=lowercase_ , default=1 , help="""Number of http workers""" )
serve_parser.add_argument("""--model""" , type=lowercase_ , help="""Model's name or path to stored model.""" )
serve_parser.add_argument("""--config""" , type=lowercase_ , help="""Model's config name or path to stored model.""" )
serve_parser.add_argument("""--tokenizer""" , type=lowercase_ , help="""Tokenizer name to use.""" )
serve_parser.add_argument(
"""--device""" , type=lowercase_ , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
serve_parser.set_defaults(func=lowercase_ )
def __init__( self : Optional[int] , lowercase_ : Pipeline , lowercase_ : str , lowercase_ : int , lowercase_ : int ):
'''simple docstring'''
lowercase_ = pipeline
lowercase_ = host
lowercase_ = port
lowercase_ = workers
if not _serve_dependencies_installed:
raise RuntimeError(
"""Using serve command requires FastAPI and uvicorn. """
"""Please install transformers with [serving]: pip install \"transformers[serving]\"."""
"""Or install FastAPI and uvicorn separately.""" )
else:
logger.info(F"""Serving model over {host}:{port}""" )
lowercase_ = FastAPI(
routes=[
APIRoute(
"""/""" , self.model_info , response_model=lowercase_ , response_class=lowercase_ , methods=["""GET"""] , ),
APIRoute(
"""/tokenize""" , self.tokenize , response_model=lowercase_ , response_class=lowercase_ , methods=["""POST"""] , ),
APIRoute(
"""/detokenize""" , self.detokenize , response_model=lowercase_ , response_class=lowercase_ , methods=["""POST"""] , ),
APIRoute(
"""/forward""" , self.forward , response_model=lowercase_ , response_class=lowercase_ , methods=["""POST"""] , ),
] , timeout=600 , )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
run(self._app , host=self.host , port=self.port , workers=self.workers )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def lowerCamelCase__ ( self : Union[str, Any] , lowercase_ : str = Body(lowercase_ , embed=lowercase_ ) , lowercase_ : bool = Body(lowercase_ , embed=lowercase_ ) ):
'''simple docstring'''
try:
lowercase_ = self._pipeline.tokenizer.tokenize(lowercase_ )
if return_ids:
lowercase_ = self._pipeline.tokenizer.convert_tokens_to_ids(lowercase_ )
return ServeTokenizeResult(tokens=lowercase_ , tokens_ids=lowercase_ )
else:
return ServeTokenizeResult(tokens=lowercase_ )
except Exception as e:
raise HTTPException(status_code=500 , detail={"""model""": """""", """error""": str(lowercase_ )} )
def lowerCamelCase__ ( self : List[Any] , lowercase_ : List[int] = Body(lowercase_ , embed=lowercase_ ) , lowercase_ : bool = Body(lowercase_ , embed=lowercase_ ) , lowercase_ : bool = Body(lowercase_ , embed=lowercase_ ) , ):
'''simple docstring'''
try:
lowercase_ = self._pipeline.tokenizer.decode(lowercase_ , lowercase_ , lowercase_ )
return ServeDeTokenizeResult(model="""""" , text=lowercase_ )
except Exception as e:
raise HTTPException(status_code=500 , detail={"""model""": """""", """error""": str(lowercase_ )} )
async def lowerCamelCase__ ( self : List[str] , lowercase_ : str=Body(lowercase_ , embed=lowercase_ ) ):
'''simple docstring'''
if len(lowercase_ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
lowercase_ = self._pipeline(lowercase_ )
return ServeForwardResult(output=lowercase_ )
except Exception as e:
raise HTTPException(500 , {"""error""": str(lowercase_ )} )
| 710
|
'''simple docstring'''
# Lint as: python3
import itertools
import os
import re
__snake_case = re.compile(r"""([A-Z]+)([A-Z][a-z])""")
__snake_case = re.compile(r"""([a-z\d])([A-Z])""")
__snake_case = re.compile(r"""(?<!_)_(?!_)""")
__snake_case = re.compile(r"""(_{2,})""")
__snake_case = r"""^\w+(\.\w+)*$"""
__snake_case = r"""<>:/\|?*"""
def A_ ( SCREAMING_SNAKE_CASE_ ) ->Union[str, Any]:
lowercase_ = _uppercase_uppercase_re.sub(r"""\1_\2""" , SCREAMING_SNAKE_CASE_ )
lowercase_ = _lowercase_uppercase_re.sub(r"""\1_\2""" , SCREAMING_SNAKE_CASE_ )
return name.lower()
def A_ ( SCREAMING_SNAKE_CASE_ ) ->List[Any]:
lowercase_ = _single_underscore_re.split(SCREAMING_SNAKE_CASE_ )
lowercase_ = [_multiple_underscores_re.split(SCREAMING_SNAKE_CASE_ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(SCREAMING_SNAKE_CASE_ ) if n != """""" )
def A_ ( SCREAMING_SNAKE_CASE_ ) ->Any:
if os.path.basename(SCREAMING_SNAKE_CASE_ ) != name:
raise ValueError(f"""Should be a dataset name, not a path: {name}""" )
return camelcase_to_snakecase(SCREAMING_SNAKE_CASE_ )
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Any:
if os.path.basename(SCREAMING_SNAKE_CASE_ ) != name:
raise ValueError(f"""Should be a dataset name, not a path: {name}""" )
if not re.match(_split_re , SCREAMING_SNAKE_CASE_ ):
raise ValueError(f"""Split name should match '{_split_re}'' but got '{split}'.""" )
return f"""{filename_prefix_for_name(SCREAMING_SNAKE_CASE_ )}-{split}"""
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) ->Tuple:
lowercase_ = filename_prefix_for_split(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if filetype_suffix:
prefix += f""".{filetype_suffix}"""
lowercase_ = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return f"""{filepath}*"""
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None ) ->Optional[Any]:
lowercase_ = filename_prefix_for_split(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if shard_lengths:
lowercase_ = len(SCREAMING_SNAKE_CASE_ )
lowercase_ = [f"""{prefix}-{shard_id:05d}-of-{num_shards:05d}""" for shard_id in range(SCREAMING_SNAKE_CASE_ )]
if filetype_suffix:
lowercase_ = [filename + f""".{filetype_suffix}""" for filename in filenames]
return filenames
else:
lowercase_ = prefix
if filetype_suffix:
filename += f""".{filetype_suffix}"""
return [filename]
| 603
| 0
|
"""simple docstring"""
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def lowerCamelCase_( _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs ))
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Any = np.max(_outputs , axis=-1 , keepdims=_lowerCamelCase )
_lowerCamelCase : Dict = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_lowerCamelCase )
class A_ ( _a ):
lowerCAmelCase__ = 'sigmoid'
lowerCAmelCase__ = 'softmax'
lowerCAmelCase__ = 'none'
@add_end_docstrings(
_a , r'\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `"default"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `"sigmoid"`: Applies the sigmoid function on the output.\n - `"softmax"`: Applies the softmax function on the output.\n - `"none"`: Does not apply any function on the output.\n ' , )
class A_ ( _a ):
lowerCAmelCase__ = False
lowerCAmelCase__ = ClassificationFunction.NONE
def __init__( self: str ,**__lowerCAmelCase: str ):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def _lowercase ( self: Dict ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Optional[Any]=None ,__lowerCAmelCase: List[Any]="" ,**__lowerCAmelCase: List[str] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = tokenizer_kwargs
_lowerCamelCase : Optional[int] = {}
if hasattr(self.model.config ,"return_all_scores" ) and return_all_scores is None:
_lowerCamelCase : Tuple = self.model.config.return_all_scores
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) or top_k is None:
_lowerCamelCase : List[str] = top_k
_lowerCamelCase : Union[str, Any] = False
elif return_all_scores is not None:
warnings.warn(
"`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"
" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`." ,__lowerCAmelCase ,)
if return_all_scores:
_lowerCamelCase : Optional[int] = None
else:
_lowerCamelCase : Union[str, Any] = 1
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : Optional[int] = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
_lowerCamelCase : Dict = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self: int ,*__lowerCAmelCase: List[Any] ,**__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : Dict = super().__call__(*__lowerCAmelCase ,**__lowerCAmelCase )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
_lowerCamelCase : Optional[Any] = "top_k" not in kwargs
if isinstance(args[0] ,__lowerCAmelCase ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def _lowercase ( self: int ,__lowerCAmelCase: List[str] ,**__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : int = self.framework
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
return self.tokenizer(**__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ) and len(__lowerCAmelCase ) == 1 and isinstance(inputs[0] ,__lowerCAmelCase ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] ,text_pair=inputs[0][1] ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"
" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair." )
return self.tokenizer(__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
def _lowercase ( self: int ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
return self.model(**__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: int=None ,__lowerCAmelCase: str=1 ,__lowerCAmelCase: Dict=True ):
'''simple docstring'''
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
_lowerCamelCase : Dict = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
_lowerCamelCase : List[Any] = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config ,"function_to_apply" ) and function_to_apply is None:
_lowerCamelCase : Optional[int] = self.model.config.function_to_apply
else:
_lowerCamelCase : str = ClassificationFunction.NONE
_lowerCamelCase : List[Any] = model_outputs["logits"][0]
_lowerCamelCase : Optional[int] = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
_lowerCamelCase : str = sigmoid(__lowerCAmelCase )
elif function_to_apply == ClassificationFunction.SOFTMAX:
_lowerCamelCase : Optional[int] = softmax(__lowerCAmelCase )
elif function_to_apply == ClassificationFunction.NONE:
_lowerCamelCase : str = outputs
else:
raise ValueError(F"""Unrecognized `function_to_apply` argument: {function_to_apply}""" )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
_lowerCamelCase : Optional[int] = [
{"label": self.model.config.idalabel[i], "score": score.item()} for i, score in enumerate(__lowerCAmelCase )
]
if not _legacy:
dict_scores.sort(key=lambda __lowerCAmelCase : x["score"] ,reverse=__lowerCAmelCase )
if top_k is not None:
_lowerCamelCase : Any = dict_scores[:top_k]
return dict_scores
| 46
|
"""simple docstring"""
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
_lowerCAmelCase : Tuple = '''\
Text data.
Second line of data.'''
_lowerCAmelCase : str = '''file'''
@pytest.fixture(scope="session" )
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : str = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
_lowerCamelCase : List[str] = bytes(_lowerCamelCase , "utf-8" )
with zstd.open(_lowerCamelCase , "wb" ) as f:
f.write(_lowerCamelCase )
return path
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , _lowerCamelCase ) , "w" ) as f:
f.write(_lowerCamelCase )
return FILE_PATH
@pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Tuple = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
_lowerCamelCase : Tuple = input_paths[compression_format]
_lowerCamelCase : int = tmp_path / "cache"
_lowerCamelCase : Any = DownloadConfig(cache_dir=_lowerCamelCase , extract_compressed_file=_lowerCamelCase )
_lowerCamelCase : Optional[Any] = cached_path(_lowerCamelCase , download_config=_lowerCamelCase )
with open(_lowerCamelCase ) as f:
_lowerCamelCase : List[Any] = f.read()
with open(_lowerCamelCase ) as f:
_lowerCamelCase : int = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted" , [True, False] )
@pytest.mark.parametrize("default_cache_dir" , [True, False] )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = "custom_cache"
_lowerCamelCase : List[str] = "custom_extracted_dir"
_lowerCamelCase : str = tmp_path / "custom_extracted_path"
if default_extracted:
_lowerCamelCase : Dict = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , _lowerCamelCase )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(_lowerCamelCase ) )
_lowerCamelCase : int = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_lowerCamelCase : int = xz_file
_lowerCamelCase : List[Any] = (
DownloadConfig(extract_compressed_file=_lowerCamelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_lowerCamelCase )
)
_lowerCamelCase : Dict = cached_path(_lowerCamelCase , download_config=_lowerCamelCase )
assert Path(_lowerCamelCase ).parent.parts[-2:] == expected
def lowerCamelCase_( _lowerCamelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase : Tuple = str(Path(_lowerCamelCase ).resolve() )
assert cached_path(_lowerCamelCase ) == text_file
# relative path
_lowerCamelCase : Optional[int] = str(Path(_lowerCamelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_lowerCamelCase ) == text_file
def lowerCamelCase_( _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase : str = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(_lowerCamelCase ):
cached_path(_lowerCamelCase )
# relative path
_lowerCamelCase : List[Any] = "./__missing_file__.txt"
with pytest.raises(_lowerCamelCase ):
cached_path(_lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : int = get_from_cache(F"""tmp://{tmpfs_file}""" )
with open(_lowerCamelCase ) as f:
_lowerCamelCase : Tuple = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( ) -> int:
'''simple docstring'''
with pytest.raises(_lowerCamelCase ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_lowerCamelCase ):
http_get("https://huggingface.co" , temp_file=_lowerCamelCase )
with pytest.raises(_lowerCamelCase ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
_lowerCamelCase : Any = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_lowerCamelCase ):
ftp_get("ftp://huggingface.co" , temp_file=_lowerCamelCase )
with pytest.raises(_lowerCamelCase ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_lowerCamelCase ):
fsspec_get("s3://huggingface.co" , temp_file=_lowerCamelCase )
with pytest.raises(_lowerCamelCase ):
fsspec_head("s3://huggingface.co" )
| 46
| 1
|
'''simple docstring'''
import math
def lowercase_ ( lowercase__ , lowercase__ ) ->float:
return math.pow(lowercase__ , 2 ) - a
def lowercase_ ( lowercase__ ) ->float:
return 2 * x
def lowercase_ ( lowercase__ ) ->float:
_snake_case: int = 2.0
while start <= a:
_snake_case: List[str] = math.pow(lowercase__ , 2 )
return start
def lowercase_ ( lowercase__ , lowercase__ = 9999 , lowercase__ = 0.0_0_0_0_0_0_0_0_0_0_0_0_0_1 ) ->float:
if a < 0:
raise ValueError('math domain error' )
_snake_case: Union[str, Any] = get_initial_point(lowercase__ )
for _ in range(lowercase__ ):
_snake_case: Optional[Any] = value
_snake_case: List[Any] = value - fx(lowercase__ , lowercase__ ) / fx_derivative(lowercase__ )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 273
|
'''simple docstring'''
from __future__ import annotations
import os
from collections.abc import Mapping
A : Optional[Any] = tuple[int, int]
class lowerCamelCase :
def __init__( self : Tuple , __snake_case : set[int] , __snake_case : Mapping[EdgeT, int] ):
'''simple docstring'''
_snake_case: set[int] = vertices
_snake_case: dict[EdgeT, int] = {
(min(__snake_case ), max(__snake_case )): weight for edge, weight in edges.items()
}
def SCREAMING_SNAKE_CASE_ ( self : int , __snake_case : EdgeT , __snake_case : int ):
'''simple docstring'''
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
_snake_case: Dict = weight
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
_snake_case: Graph = Graph({min(self.vertices )} , {} )
_snake_case: EdgeT
_snake_case: int
_snake_case: EdgeT
_snake_case: int
while len(subgraph.vertices ) < len(self.vertices ):
_snake_case: List[str] = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
_snake_case: Optional[Any] = edge
_snake_case: Optional[int] = weight
subgraph.add_edge(__snake_case , __snake_case )
return subgraph
def lowercase_ ( lowercase__ = "p107_network.txt" ) ->int:
_snake_case: str = os.path.abspath(os.path.dirname(lowercase__ ) )
_snake_case: str = os.path.join(lowercase__ , lowercase__ )
_snake_case: dict[EdgeT, int] = {}
_snake_case: list[str]
_snake_case: int
_snake_case: int
with open(lowercase__ ) as f:
_snake_case: Tuple = f.read().strip().split('\n' )
_snake_case: Tuple = [line.split(',' ) for line in data]
for edgea in range(1 , len(lowercase__ ) ):
for edgea in range(lowercase__ ):
if adjaceny_matrix[edgea][edgea] != "-":
_snake_case: int = int(adjaceny_matrix[edgea][edgea] )
_snake_case: Graph = Graph(set(range(len(lowercase__ ) ) ) , lowercase__ )
_snake_case: Graph = graph.prims_algorithm()
_snake_case: int = sum(graph.edges.values() )
_snake_case: int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F'{solution() = }')
| 273
| 1
|
import os
def a_ ( __lowercase : str = "matrix.txt" ) -> int:
with open(os.path.join(os.path.dirname(__lowercase ) , __lowercase ) ) as in_file:
_snake_case = in_file.read()
_snake_case = [[int(__lowercase ) for cell in row.split(',' )] for row in data.strip().splitlines()]
_snake_case = [[0 for cell in row] for row in grid]
_snake_case = len(grid[0] )
_snake_case = [[0 for i in range(__lowercase )] for j in range(__lowercase )]
_snake_case = grid[0][0]
for i in range(1 , __lowercase ):
_snake_case = grid[0][i] + dp[0][i - 1]
for i in range(1 , __lowercase ):
_snake_case = grid[i][0] + dp[i - 1][0]
for i in range(1 , __lowercase ):
for j in range(1 , __lowercase ):
_snake_case = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F'{solution() = }')
| 686
|
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
_lowerCamelCase : Tuple = logging.get_logger(__name__)
# General docstring
_lowerCamelCase : Union[str, Any] = '''ResNetConfig'''
# Base docstring
_lowerCamelCase : int = '''microsoft/resnet-50'''
_lowerCamelCase : Optional[Any] = [1, 2_048, 7, 7]
# Image classification docstring
_lowerCamelCase : int = '''microsoft/resnet-50'''
_lowerCamelCase : Optional[int] = '''tiger cat'''
_lowerCamelCase : str = [
'''microsoft/resnet-50''',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : int , lowercase : int , lowercase : int = 3 , lowercase : int = 1 , lowercase : str = "relu" ):
'''simple docstring'''
super().__init__()
_snake_case = nn.Convad(
lowercase , lowercase , kernel_size=lowercase , stride=lowercase , padding=kernel_size // 2 , bias=lowercase )
_snake_case = nn.BatchNormad(lowercase )
_snake_case = ACTaFN[activation] if activation is not None else nn.Identity()
def A ( self : Union[str, Any] , lowercase : Tensor ):
'''simple docstring'''
_snake_case = self.convolution(lowercase )
_snake_case = self.normalization(lowercase )
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : ResNetConfig ):
'''simple docstring'''
super().__init__()
_snake_case = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
_snake_case = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
_snake_case = config.num_channels
def A ( self : Tuple , lowercase : Tensor ):
'''simple docstring'''
_snake_case = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
_snake_case = self.embedder(lowercase )
_snake_case = self.pooler(lowercase )
return embedding
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase : int , lowercase : int , lowercase : int = 2 ):
'''simple docstring'''
super().__init__()
_snake_case = nn.Convad(lowercase , lowercase , kernel_size=1 , stride=lowercase , bias=lowercase )
_snake_case = nn.BatchNormad(lowercase )
def A ( self : List[str] , lowercase : Tensor ):
'''simple docstring'''
_snake_case = self.convolution(lowercase )
_snake_case = self.normalization(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : int , lowercase : int , lowercase : int = 1 , lowercase : str = "relu" ):
'''simple docstring'''
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = (
ResNetShortCut(lowercase , lowercase , stride=lowercase ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowercase , lowercase , stride=lowercase ) , ResNetConvLayer(lowercase , lowercase , activation=lowercase ) , )
_snake_case = ACTaFN[activation]
def A ( self : List[str] , lowercase : List[str] ):
'''simple docstring'''
_snake_case = hidden_state
_snake_case = self.layer(lowercase )
_snake_case = self.shortcut(lowercase )
hidden_state += residual
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : int , lowercase : int , lowercase : int = 1 , lowercase : str = "relu" , lowercase : int = 4 ):
'''simple docstring'''
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = out_channels // reduction
_snake_case = (
ResNetShortCut(lowercase , lowercase , stride=lowercase ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowercase , lowercase , kernel_size=1 ) , ResNetConvLayer(lowercase , lowercase , stride=lowercase ) , ResNetConvLayer(lowercase , lowercase , kernel_size=1 , activation=lowercase ) , )
_snake_case = ACTaFN[activation]
def A ( self : Dict , lowercase : Union[str, Any] ):
'''simple docstring'''
_snake_case = hidden_state
_snake_case = self.layer(lowercase )
_snake_case = self.shortcut(lowercase )
hidden_state += residual
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowercase : ResNetConfig , lowercase : int , lowercase : int , lowercase : int = 2 , lowercase : int = 2 , ):
'''simple docstring'''
super().__init__()
_snake_case = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
_snake_case = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(lowercase , lowercase , stride=lowercase , activation=config.hidden_act ) , *[layer(lowercase , lowercase , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def A ( self : List[str] , lowercase : Tensor ):
'''simple docstring'''
_snake_case = input
for layer in self.layers:
_snake_case = layer(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : ResNetConfig ):
'''simple docstring'''
super().__init__()
_snake_case = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_snake_case = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowercase , config.depths[1:] ):
self.stages.append(ResNetStage(lowercase , lowercase , lowercase , depth=lowercase ) )
def A ( self : str , lowercase : Tensor , lowercase : bool = False , lowercase : bool = True ):
'''simple docstring'''
_snake_case = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
_snake_case = stage_module(lowercase )
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=lowercase , hidden_states=lowercase , )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = ResNetConfig
_UpperCAmelCase : Tuple = "resnet"
_UpperCAmelCase : Optional[Any] = "pixel_values"
_UpperCAmelCase : Dict = True
def A ( self : List[str] , lowercase : Dict ):
'''simple docstring'''
if isinstance(lowercase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(lowercase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def A ( self : Tuple , lowercase : List[Any] , lowercase : Optional[Any]=False ):
'''simple docstring'''
if isinstance(lowercase , lowercase ):
_snake_case = value
_lowerCamelCase : str = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
_lowerCamelCase : int = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare ResNet model outputting raw features without any specific head on top." ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : Any ):
'''simple docstring'''
super().__init__(lowercase )
_snake_case = config
_snake_case = ResNetEmbeddings(lowercase )
_snake_case = ResNetEncoder(lowercase )
_snake_case = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A ( self : Union[str, Any] , lowercase : Tensor , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None ):
'''simple docstring'''
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.embedder(lowercase )
_snake_case = self.encoder(
lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_snake_case = encoder_outputs[0]
_snake_case = self.pooler(lowercase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase , pooler_output=lowercase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase : int ):
'''simple docstring'''
super().__init__(lowercase )
_snake_case = config.num_labels
_snake_case = ResNetModel(lowercase )
# classification head
_snake_case = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A ( self : Union[str, Any] , lowercase : Optional[torch.FloatTensor] = None , lowercase : Optional[torch.LongTensor] = None , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None , ):
'''simple docstring'''
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.resnet(lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_snake_case = outputs.pooler_output if return_dict else outputs[1]
_snake_case = self.classifier(lowercase )
_snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_snake_case = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_snake_case = 'single_label_classification'
else:
_snake_case = 'multi_label_classification'
if self.config.problem_type == "regression":
_snake_case = MSELoss()
if self.num_labels == 1:
_snake_case = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_snake_case = loss_fct(lowercase , lowercase )
elif self.config.problem_type == "single_label_classification":
_snake_case = CrossEntropyLoss()
_snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_snake_case = BCEWithLogitsLoss()
_snake_case = loss_fct(lowercase , lowercase )
if not return_dict:
_snake_case = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n " ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Tuple , lowercase : Union[str, Any] ):
'''simple docstring'''
super().__init__(lowercase )
super()._init_backbone(lowercase )
_snake_case = [config.embedding_size] + config.hidden_sizes
_snake_case = ResNetEmbeddings(lowercase )
_snake_case = ResNetEncoder(lowercase )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@replace_return_docstrings(output_type=lowercase , config_class=_CONFIG_FOR_DOC )
def A ( self : Dict , lowercase : Tensor , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None ):
'''simple docstring'''
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = self.embedder(lowercase )
_snake_case = self.encoder(lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_snake_case = outputs.hidden_states
_snake_case = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_snake_case = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=lowercase , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowercase , )
| 686
| 1
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def UpperCamelCase ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int=10 ):
__a = []
for _ in range(UpperCAmelCase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def UpperCamelCase ( _lowerCAmelCase : Any , _lowerCAmelCase : Tuple=10 ):
__a = []
for step in range(UpperCAmelCase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
__a = os.path.join(UpperCAmelCase__ , """schedule.bin""" )
torch.save(scheduler.state_dict() , UpperCAmelCase__ )
__a = torch.load(UpperCAmelCase__ )
scheduler.load_state_dict(UpperCAmelCase__ )
return lrs
@require_torch
class a ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Any , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Dict ) -> Tuple:
self.assertEqual(len(_A ) , len(_A ) )
for a, b in zip(_A , _A ):
self.assertAlmostEqual(_A , _A , delta=_A )
def lowerCAmelCase_ ( self : List[str] ) -> Optional[int]:
__a = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_A )
__a = torch.tensor([0.4, 0.2, -0.5] )
__a = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
__a = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(1_00 ):
__a = criterion(_A , _A )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def lowerCAmelCase_ ( self : Dict ) -> List[Any]:
__a = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_A )
__a = torch.tensor([0.4, 0.2, -0.5] )
__a = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
__a = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=_A , weight_decay=0.0 , relative_step=_A , scale_parameter=_A , warmup_init=_A , )
for _ in range(10_00 ):
__a = criterion(_A , _A )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class a ( unittest.TestCase ):
'''simple docstring'''
A_ : List[Any] = nn.Linear(50 , 50 ) if is_torch_available() else None
A_ : Optional[int] = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
A_ : Optional[Any] = 10
def lowerCAmelCase_ ( self : Union[str, Any] , lowerCamelCase_ : int , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any=None ) -> Any:
self.assertEqual(len(_A ) , len(_A ) )
for a, b in zip(_A , _A ):
self.assertAlmostEqual(_A , _A , delta=_A , msg=_A )
def lowerCAmelCase_ ( self : int ) -> Optional[int]:
__a = {"""num_warmup_steps""": 2, """num_training_steps""": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
__a = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"""num_warmup_steps""": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, """num_cycles""": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, """power""": 2.0, """lr_end""": 1E-7},
[0.0, 5.0, 10.0, 7.6_56, 5.6_25, 3.9_06, 2.5, 1.4_06, 0.6_25, 0.1_56],
),
get_inverse_sqrt_schedule: (
{"""num_warmup_steps""": 2},
[0.0, 5.0, 10.0, 8.1_65, 7.0_71, 6.3_25, 5.7_74, 5.3_45, 5.0, 4.7_14],
),
}
for scheduler_func, data in scheds.items():
__a , __a = data
__a = scheduler_func(self.optimizer , **_A )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
__a = unwrap_schedule(_A , self.num_steps )
self.assertListAlmostEqual(
_A , _A , tol=1E-2 , msg=F"""failed for {scheduler_func} in normal scheduler""" , )
__a = scheduler_func(self.optimizer , **_A )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(_A ) # wrap to test picklability of the schedule
__a = unwrap_and_save_reload_schedule(_A , self.num_steps )
self.assertListEqual(_A , _A , msg=F"""failed for {scheduler_func} in save and reload""" )
class a :
'''simple docstring'''
def __init__( self : int , lowerCamelCase_ : str ) -> Union[str, Any]:
__a = fn
def __call__( self : Dict , *lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : Any ) -> int:
return self.fn(*_A , **_A )
@classmethod
def lowerCAmelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Dict ) -> List[str]:
__a = list(map(self , scheduler.lr_lambdas ) )
| 714
|
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class a ( unittest.TestCase ):
def __init__( self : Optional[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[str, Any]=7 , lowerCamelCase_ : Optional[Any]=3 , lowerCamelCase_ : int=30 , lowerCamelCase_ : Union[str, Any]=4_00 , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : int=None , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : List[str]=[0.5, 0.5, 0.5] , lowerCamelCase_ : Optional[Any]=[0.5, 0.5, 0.5] , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : List[Any]=1 / 2_55 , lowerCamelCase_ : int=True , ) -> str:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__a = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 13_33}
__a = parent
__a = batch_size
__a = num_channels
__a = min_resolution
__a = max_resolution
__a = do_resize
__a = size
__a = do_normalize
__a = image_mean
__a = image_std
__a = do_rescale
__a = rescale_factor
__a = do_pad
def lowerCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase_ ( self : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : int=False ) -> List[str]:
if not batched:
__a = image_inputs[0]
if isinstance(lowerCamelCase_ , Image.Image ):
__a , __a = image.size
else:
__a , __a = image.shape[1], image.shape[2]
if w < h:
__a = int(self.size["""shortest_edge"""] * h / w )
__a = self.size["""shortest_edge"""]
elif w > h:
__a = self.size["""shortest_edge"""]
__a = int(self.size["""shortest_edge"""] * w / h )
else:
__a = self.size["""shortest_edge"""]
__a = self.size["""shortest_edge"""]
else:
__a = []
for image in image_inputs:
__a , __a = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__a = max(lowerCamelCase_ , key=lambda lowerCamelCase_ : item[0] )[0]
__a = max(lowerCamelCase_ , key=lambda lowerCamelCase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a ( A_ , unittest.TestCase ):
A_ : Optional[Any] = ConditionalDetrImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self : Dict ) -> Tuple:
__a = ConditionalDetrImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self : Any ) -> List[str]:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : Optional[Any] ) -> Union[str, Any]:
__a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase_ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """size""" ) )
def lowerCAmelCase_ ( self : Optional[int] ) -> List[str]:
__a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 13_33} )
self.assertEqual(image_processor.do_pad , lowerCamelCase_ )
__a = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCamelCase_ )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , lowerCamelCase_ )
def lowerCAmelCase_ ( self : Tuple ) -> Tuple:
pass
def lowerCAmelCase_ ( self : Optional[Any] ) -> List[Any]:
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , Image.Image )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase_ , batched=lowerCamelCase_ )
__a = image_processing(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self : Optional[int] ) -> Tuple:
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , np.ndarray )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a = image_processing(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase_ , batched=lowerCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self : str ) -> List[Any]:
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , torch.Tensor )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a = image_processing(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase_ , batched=lowerCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase_ ( self : Optional[int] ) -> Tuple:
# prepare image and target
__a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
__a = json.loads(f.read() )
__a = {"""image_id""": 3_97_69, """annotations""": target}
# encode them
__a = ConditionalDetrImageProcessor.from_pretrained("""microsoft/conditional-detr-resnet-50""" )
__a = image_processing(images=lowerCamelCase_ , annotations=lowerCamelCase_ , return_tensors="""pt""" )
# verify pixel values
__a = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , lowerCamelCase_ )
__a = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowerCamelCase_ , atol=1E-4 ) )
# verify area
__a = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowerCamelCase_ ) )
# verify boxes
__a = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowerCamelCase_ )
__a = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowerCamelCase_ , atol=1E-3 ) )
# verify image_id
__a = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowerCamelCase_ ) )
# verify is_crowd
__a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowerCamelCase_ ) )
# verify class_labels
__a = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowerCamelCase_ ) )
# verify orig_size
__a = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowerCamelCase_ ) )
# verify size
__a = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowerCamelCase_ ) )
@slow
def lowerCAmelCase_ ( self : str ) -> str:
# prepare image, target and masks_path
__a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
__a = json.loads(f.read() )
__a = {"""file_name""": """000000039769.png""", """image_id""": 3_97_69, """segments_info""": target}
__a = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
__a = ConditionalDetrImageProcessor(format="""coco_panoptic""" )
__a = image_processing(images=lowerCamelCase_ , annotations=lowerCamelCase_ , masks_path=lowerCamelCase_ , return_tensors="""pt""" )
# verify pixel values
__a = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , lowerCamelCase_ )
__a = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowerCamelCase_ , atol=1E-4 ) )
# verify area
__a = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowerCamelCase_ ) )
# verify boxes
__a = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowerCamelCase_ )
__a = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowerCamelCase_ , atol=1E-3 ) )
# verify image_id
__a = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowerCamelCase_ ) )
# verify is_crowd
__a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowerCamelCase_ ) )
# verify class_labels
__a = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowerCamelCase_ ) )
# verify masks
__a = 82_28_73
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , lowerCamelCase_ )
# verify orig_size
__a = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowerCamelCase_ ) )
# verify size
__a = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowerCamelCase_ ) )
| 173
| 0
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase )
class lowerCamelCase_ ( lowerCamelCase ):
a__ = field(default='''audio-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
a__ = Features({'''audio''': Audio()} )
a__ = Features({'''labels''': ClassLabel} )
a__ = "audio"
a__ = "labels"
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , __lowerCAmelCase ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
__magic_name__ :List[Any] = copy.deepcopy(self )
__magic_name__ :Optional[int] = self.label_schema.copy()
__magic_name__ :Optional[int] = features[self.label_column]
__magic_name__ :int = label_schema
return task_template
@property
def A ( self ):
"""simple docstring"""
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 0
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''blenderbot-small'''
SCREAMING_SNAKE_CASE_ : int = ['''past_key_values''']
SCREAMING_SNAKE_CASE_ : Tuple = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[str] ,__A : List[Any]=5_0265 ,__A : str=512 ,__A : Optional[int]=8 ,__A : Any=2048 ,__A : Tuple=16 ,__A : str=8 ,__A : int=2048 ,__A : List[str]=16 ,__A : Optional[int]=0.0 ,__A : Any=0.0 ,__A : int=True ,__A : List[Any]=True ,__A : Tuple="gelu" ,__A : Any=512 ,__A : Dict=0.1 ,__A : Tuple=0.0 ,__A : int=0.0 ,__A : int=0.02 ,__A : Dict=1 ,__A : str=False ,__A : Dict=0 ,__A : Union[str, Any]=1 ,__A : Optional[int]=2 ,__A : List[str]=2 ,**__A : Tuple ,) -> Tuple:
_lowercase = vocab_size
_lowercase = max_position_embeddings
_lowercase = d_model
_lowercase = encoder_ffn_dim
_lowercase = encoder_layers
_lowercase = encoder_attention_heads
_lowercase = decoder_ffn_dim
_lowercase = decoder_layers
_lowercase = decoder_attention_heads
_lowercase = dropout
_lowercase = attention_dropout
_lowercase = activation_dropout
_lowercase = activation_function
_lowercase = init_std
_lowercase = encoder_layerdrop
_lowercase = decoder_layerdrop
_lowercase = use_cache
_lowercase = encoder_layers
_lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__A ,bos_token_id=__A ,eos_token_id=__A ,is_encoder_decoder=__A ,decoder_start_token_id=__A ,forced_eos_token_id=__A ,**__A ,)
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_lowercase = {0: 'batch'}
_lowercase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
_lowercase = {0: 'batch', 1: 'decoder_sequence'}
_lowercase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__A ,direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_lowercase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_lowercase , _lowercase = self.num_layers
for i in range(__A ):
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
else:
_lowercase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def __UpperCAmelCase ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = super().outputs
else:
_lowercase = super(__A ,self ).outputs
if self.use_past:
_lowercase , _lowercase = self.num_layers
for i in range(__A ):
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def __UpperCAmelCase ( self : Optional[int] ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,__A ,__A ,__A ,__A )
# Generate decoder inputs
_lowercase = seq_length if not self.use_past else 1
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,__A ,__A ,__A ,__A )
_lowercase = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
_lowercase = dict(**__A ,**__A )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowercase , _lowercase = common_inputs['input_ids'].shape
_lowercase = common_inputs['decoder_input_ids'].shape[1]
_lowercase , _lowercase = self.num_attention_heads
_lowercase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowercase = decoder_seq_length + 3
_lowercase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_lowercase = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(__A ,__A )] ,dim=1 )
_lowercase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_lowercase , _lowercase = self.num_layers
_lowercase = min(__A ,__A )
_lowercase = max(__A ,__A ) - min_num_layers
_lowercase = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(__A ):
common_inputs["past_key_values"].append(
(
torch.zeros(__A ),
torch.zeros(__A ),
torch.zeros(__A ),
torch.zeros(__A ),
) )
# TODO: test this.
_lowercase = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(__A ,__A ):
common_inputs["past_key_values"].append((torch.zeros(__A ), torch.zeros(__A )) )
return common_inputs
def __UpperCAmelCase ( self : List[Any] ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,__A ,__A ,__A ,__A )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowercase , _lowercase = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_lowercase = seqlen + 2
_lowercase , _lowercase = self.num_layers
_lowercase , _lowercase = self.num_attention_heads
_lowercase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowercase = common_inputs['attention_mask'].dtype
_lowercase = torch.cat(
[common_inputs['attention_mask'], torch.ones(__A ,__A ,dtype=__A )] ,dim=1 )
_lowercase = [
(torch.zeros(__A ), torch.zeros(__A )) for _ in range(__A )
]
return common_inputs
def __UpperCAmelCase ( self : Any ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowercase = compute_effective_axis_dimension(
__A ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowercase = tokenizer.num_special_tokens_to_add(__A )
_lowercase = compute_effective_axis_dimension(
__A ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=__A )
# Generate dummy inputs according to compute batch and sequence
_lowercase = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
_lowercase = dict(tokenizer(__A ,return_tensors=__A ) )
return common_inputs
def __UpperCAmelCase ( self : Dict ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
elif self.task == "causal-lm":
_lowercase = self._generate_dummy_inputs_for_causal_lm(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
else:
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
return common_inputs
def __UpperCAmelCase ( self : List[str] ,__A : Dict ,__A : Any ,__A : List[Any] ,__A : Tuple ) -> Union[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = super()._flatten_past_key_values_(__A ,__A ,__A ,__A )
else:
_lowercase = super(__A ,self )._flatten_past_key_values_(
__A ,__A ,__A ,__A )
| 67
| 0
|
def lowerCAmelCase_ ( snake_case_ = 1,snake_case_ = 1000 ):
_A : Optional[int] = 1
_A : List[Any] = 0
for divide_by_number in range(snake_case_,digit + 1 ):
_A : list[int] = []
_A : Any = numerator
for _ in range(1,digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(snake_case_ ):
_A : Optional[Any] = len(snake_case_ )
_A : Optional[Any] = divide_by_number
else:
has_been_divided.append(snake_case_ )
_A : Tuple = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707
|
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=3 , _a=18 , _a=30 , _a=400 , _a=True , _a=None , _a=True , _a=[0.5, 0.5, 0.5] , _a=[0.5, 0.5, 0.5] , ) -> Tuple:
_A : Any = size if size is not None else {"""height""": 18, """width""": 18}
_A : Optional[Any] = parent
_A : Union[str, Any] = batch_size
_A : List[Any] = num_channels
_A : List[str] = image_size
_A : Optional[Any] = min_resolution
_A : List[Any] = max_resolution
_A : Optional[Any] = do_resize
_A : str = size
_A : List[str] = do_normalize
_A : Dict = image_mean
_A : int = image_std
def a__ ( self ) -> Any:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = DPTImageProcessor if is_vision_available() else None
def a__ ( self ) -> Optional[int]:
_A : Optional[Any] = DPTImageProcessingTester(self )
@property
def a__ ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ) -> Any:
_A : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , """image_mean""" ) )
self.assertTrue(hasattr(_a , """image_std""" ) )
self.assertTrue(hasattr(_a , """do_normalize""" ) )
self.assertTrue(hasattr(_a , """do_resize""" ) )
self.assertTrue(hasattr(_a , """size""" ) )
def a__ ( self ) -> Any:
_A : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
_A : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def a__ ( self ) -> List[Any]:
# Initialize image_processing
_A : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : int = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a__ ( self ) -> Union[str, Any]:
# Initialize image_processing
_A : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
_A : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : Any = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a__ ( self ) -> List[str]:
# Initialize image_processing
_A : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_A : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : int = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 54
| 0
|
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Optional[Any]:
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
__lowerCAmelCase : Optional[Any] = (low + high) // 2
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : str = max_subarray(snake_case__ , snake_case__ , snake_case__ )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : int = max_subarray(snake_case__ , mid + 1 , snake_case__ )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Optional[Any] = max_cross_sum(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :List[Any] ) -> Tuple:
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = float("""-inf""" ), -1
__lowerCAmelCase , __lowerCAmelCase : int = float("""-inf""" ), -1
__lowerCAmelCase : Dict = 0
for i in range(snake_case__ , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
__lowerCAmelCase : Optional[Any] = summ
__lowerCAmelCase : Tuple = i
__lowerCAmelCase : str = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
__lowerCAmelCase : Optional[int] = summ
__lowerCAmelCase : List[Any] = i
return max_left, max_right, (left_sum + right_sum)
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :List[Any] ) -> List[str]:
__lowerCAmelCase : Optional[int] = [randint(1 , snake_case__ ) for _ in range(snake_case__ )]
__lowerCAmelCase : Optional[int] = time.time()
max_subarray(snake_case__ , 0 , input_size - 1 )
__lowerCAmelCase : List[str] = time.time()
return end - start
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
__lowerCAmelCase : int = [10, 100, 1_000, 10_000, 50_000, 100_000, 200_000, 300_000, 400_000, 500_000]
__lowerCAmelCase : Union[str, Any] = [time_max_subarray(snake_case__ ) for input_size in input_sizes]
print("""No of Inputs\t\tTime Taken""" )
for input_size, runtime in zip(snake_case__ , snake_case__ ):
print(snake_case__ , """\t\t""" , snake_case__ )
plt.plot(snake_case__ , snake_case__ )
plt.xlabel("""Number of Inputs""" )
plt.ylabel("""Time taken in seconds""" )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 504
|
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class lowerCamelCase (unittest.TestCase ):
def __init__( self : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any]=1_3 , __UpperCAmelCase : Tuple=3_0 , __UpperCAmelCase : str=2 , __UpperCAmelCase : List[Any]=3 , __UpperCAmelCase : Any=True , __UpperCAmelCase : Dict=True , __UpperCAmelCase : Optional[Any]=3_2 , __UpperCAmelCase : Any=5 , __UpperCAmelCase : Optional[Any]=4 , __UpperCAmelCase : Tuple=3_7 , __UpperCAmelCase : List[str]="gelu" , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : List[str]=0.1 , __UpperCAmelCase : int=1_0 , __UpperCAmelCase : Any=0.02 , ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE__ = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE__ = num_patches + 1
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
return config, pixel_values
def SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = FlaxViTModel(config=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE__ = (self.image_size, self.image_size)
SCREAMING_SNAKE_CASE__ = (self.patch_size, self.patch_size)
SCREAMING_SNAKE_CASE__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any] ) -> int:
SCREAMING_SNAKE_CASE__ = self.type_sequence_label_size
SCREAMING_SNAKE_CASE__ = FlaxViTForImageClassification(config=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = FlaxViTForImageClassification(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class lowerCamelCase (A__ ,unittest.TestCase ):
lowerCamelCase__ : Tuple = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def SCREAMING_SNAKE_CASE ( self : int ) -> None:
SCREAMING_SNAKE_CASE__ = FlaxViTModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ) -> str:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
@jax.jit
def model_jitted(__UpperCAmelCase : int , **__UpperCAmelCase : Tuple ):
return model(pixel_values=__UpperCAmelCase , **__UpperCAmelCase )
with self.subTest("""JIT Enabled""" ):
SCREAMING_SNAKE_CASE__ = model_jitted(**__UpperCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE__ = model_jitted(**__UpperCAmelCase ).to_tuple()
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) )
for jitted_output, output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class_name.from_pretrained("""google/vit-base-patch16-224""" )
SCREAMING_SNAKE_CASE__ = model(np.ones((1, 3, 2_2_4, 2_2_4) ) )
self.assertIsNotNone(__UpperCAmelCase )
| 196
| 0
|
def a_ ( __magic_name__ ) -> List[Any]:
"""simple docstring"""
if not head:
return True
# split the list to two parts
snake_case : str = head.next, head
while fast and fast.next:
snake_case : Union[str, Any] = fast.next.next
snake_case : Any = slow.next
snake_case : Union[str, Any] = slow.next
snake_case : Tuple = None # Don't forget here! But forget still works!
# reverse the second part
snake_case : List[Any] = None
while second:
snake_case : str = second.next
snake_case : Dict = node
snake_case : str = second
snake_case : List[str] = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
snake_case : List[Any] = node.next
snake_case : Optional[int] = head.next
return True
def a_ ( __magic_name__ ) -> Dict:
"""simple docstring"""
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
snake_case : str = head
while fast and fast.next:
snake_case : Tuple = fast.next.next, slow.next
# 2. Push the second half into the stack
snake_case : List[Any] = [slow.val]
while slow.next:
snake_case : str = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
snake_case : Optional[Any] = cur.next
return True
def a_ ( __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
if not head or not head.next:
return True
snake_case : List[Any] = {}
snake_case : Tuple = 0
while head:
if head.val in d:
d[head.val].append(__magic_name__ )
else:
snake_case : List[Any] = [pos]
snake_case : Optional[Any] = head.next
pos += 1
snake_case : Optional[Any] = pos - 1
snake_case : Tuple = 0
for v in d.values():
if len(__magic_name__ ) % 2 != 0:
middle += 1
else:
snake_case : List[str] = 0
for i in range(0 , len(__magic_name__ ) ):
if v[i] + v[len(__magic_name__ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 721
|
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def a_ ( __magic_name__ ) -> Tuple:
"""simple docstring"""
snake_case , snake_case : Any = image.size
snake_case , snake_case : List[str] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
snake_case : List[str] = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
snake_case : Dict = np.array(__magic_name__ ).astype(np.floataa ) / 255.0
snake_case : Optional[Any] = image[None].transpose(0 , 3 , 1 , 2 )
snake_case : Tuple = torch.from_numpy(__magic_name__ )
return 2.0 * image - 1.0
class a_ ( a ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : VQModel , UpperCAmelCase__ : UNetaDModel , UpperCAmelCase__ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=UpperCAmelCase__ , unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
@torch.no_grad()
def __call__( self : Any , UpperCAmelCase__ : Union[torch.Tensor, PIL.Image.Image] = None , UpperCAmelCase__ : Optional[int] = 1 , UpperCAmelCase__ : Optional[int] = 100 , UpperCAmelCase__ : Optional[float] = 0.0 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , ):
"""simple docstring"""
if isinstance(UpperCAmelCase__ , PIL.Image.Image ):
snake_case : Optional[int] = 1
elif isinstance(UpperCAmelCase__ , torch.Tensor ):
snake_case : Any = image.shape[0]
else:
raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(UpperCAmelCase__ )}" )
if isinstance(UpperCAmelCase__ , PIL.Image.Image ):
snake_case : Optional[Any] = preprocess(UpperCAmelCase__ )
snake_case , snake_case : Union[str, Any] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
snake_case : List[Any] = (batch_size, self.unet.config.in_channels // 2, height, width)
snake_case : str = next(self.unet.parameters() ).dtype
snake_case : Dict = randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=self.device , dtype=UpperCAmelCase__ )
snake_case : Any = image.to(device=self.device , dtype=UpperCAmelCase__ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(UpperCAmelCase__ , device=self.device )
snake_case : Optional[Any] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
snake_case : Union[str, Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
snake_case : Any = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case : Optional[Any] = {}
if accepts_eta:
snake_case : Dict = eta
for t in self.progress_bar(UpperCAmelCase__ ):
# concat latents and low resolution image in the channel dimension.
snake_case : Optional[int] = torch.cat([latents, image] , dim=1 )
snake_case : str = self.scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
# predict the noise residual
snake_case : int = self.unet(UpperCAmelCase__ , UpperCAmelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
snake_case : Any = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
# decode the image latents with the VQVAE
snake_case : Optional[int] = self.vqvae.decode(UpperCAmelCase__ ).sample
snake_case : int = torch.clamp(UpperCAmelCase__ , -1.0 , 1.0 )
snake_case : Dict = image / 2 + 0.5
snake_case : int = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case : Any = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 84
| 0
|
import numpy as np
__a: List[str] = [
['''a''', '''b''', '''c''', '''d''', '''e'''],
['''f''', '''g''', '''h''', '''i''', '''k'''],
['''l''', '''m''', '''n''', '''o''', '''p'''],
['''q''', '''r''', '''s''', '''t''', '''u'''],
['''v''', '''w''', '''x''', '''y''', '''z'''],
]
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Union[str, Any] ) -> None:
"""simple docstring"""
_UpperCAmelCase = np.array(lowerCamelCase )
def lowerCamelCase ( self : Tuple , lowerCamelCase : str ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = np.where(letter == self.SQUARE )
_UpperCAmelCase = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def lowerCamelCase ( self : str , lowerCamelCase : int , lowerCamelCase : int ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.SQUARE[indexa - 1, indexa - 1]
return letter
def lowerCamelCase ( self : Optional[Any] , lowerCamelCase : str ) -> str:
"""simple docstring"""
_UpperCAmelCase = message.lower()
_UpperCAmelCase = message.replace(""" """ , """""" )
_UpperCAmelCase = message.replace("""j""" , """i""" )
_UpperCAmelCase = np.empty((2, len(lowerCamelCase )) )
for letter_index in range(len(lowerCamelCase ) ):
_UpperCAmelCase = self.letter_to_numbers(message[letter_index] )
_UpperCAmelCase = numbers[0]
_UpperCAmelCase = numbers[1]
_UpperCAmelCase = first_step.reshape(2 * len(lowerCamelCase ) )
_UpperCAmelCase = """"""
for numbers_index in range(len(lowerCamelCase ) ):
_UpperCAmelCase = int(second_step[numbers_index * 2] )
_UpperCAmelCase = int(second_step[(numbers_index * 2) + 1] )
_UpperCAmelCase = self.numbers_to_letter(lowerCamelCase , lowerCamelCase )
_UpperCAmelCase = encoded_message + letter
return encoded_message
def lowerCamelCase ( self : Optional[int] , lowerCamelCase : str ) -> str:
"""simple docstring"""
_UpperCAmelCase = message.lower()
message.replace(""" """ , """""" )
_UpperCAmelCase = np.empty(2 * len(lowerCamelCase ) )
for letter_index in range(len(lowerCamelCase ) ):
_UpperCAmelCase = self.letter_to_numbers(message[letter_index] )
_UpperCAmelCase = numbers[0]
_UpperCAmelCase = numbers[1]
_UpperCAmelCase = first_step.reshape((2, len(lowerCamelCase )) )
_UpperCAmelCase = """"""
for numbers_index in range(len(lowerCamelCase ) ):
_UpperCAmelCase = int(second_step[0, numbers_index] )
_UpperCAmelCase = int(second_step[1, numbers_index] )
_UpperCAmelCase = self.numbers_to_letter(lowerCamelCase , lowerCamelCase )
_UpperCAmelCase = decoded_message + letter
return decoded_message
| 108
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase = LongformerTokenizer
_lowerCamelCase = True
_lowerCamelCase = LongformerTokenizerFast
_lowerCamelCase = True
def lowerCamelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCAmelCase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_UpperCAmelCase = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
_UpperCAmelCase = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_UpperCAmelCase = {"""unk_token""": """<unk>"""}
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCamelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCamelCase ) )
def lowerCamelCase ( self : List[Any] , **lowerCamelCase : str ) -> Tuple:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase )
def lowerCamelCase ( self : int , **lowerCamelCase : str ) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase )
def lowerCamelCase ( self : Union[str, Any] , lowerCamelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = """lower newer"""
_UpperCAmelCase = """lower newer"""
return input_text, output_text
def lowerCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
_UpperCAmelCase = """lower newer"""
_UpperCAmelCase = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
_UpperCAmelCase = tokenizer.tokenize(lowerCamelCase ) # , add_prefix_space=True)
self.assertListEqual(lowerCamelCase , lowerCamelCase )
_UpperCAmelCase = tokens + [tokenizer.unk_token]
_UpperCAmelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) , lowerCamelCase )
def lowerCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=lowerCamelCase ) , [0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=lowerCamelCase ) , [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] , )
@slow
def lowerCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
_UpperCAmelCase = tokenizer.encode("""sequence builders""" , add_special_tokens=lowerCamelCase )
_UpperCAmelCase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowerCamelCase )
_UpperCAmelCase = tokenizer.encode(
"""sequence builders""" , add_special_tokens=lowerCamelCase , add_prefix_space=lowerCamelCase )
_UpperCAmelCase = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=lowerCamelCase , add_prefix_space=lowerCamelCase )
_UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCamelCase )
_UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCamelCase , lowerCamelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowerCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = """Encode this sequence."""
_UpperCAmelCase = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
_UpperCAmelCase = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase , add_prefix_space=lowerCamelCase )
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowerCamelCase , lowerCamelCase )
_UpperCAmelCase = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase , add_prefix_space=lowerCamelCase )
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowerCamelCase , lowerCamelCase )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
_UpperCAmelCase = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowerCamelCase , lowerCamelCase )
# Testing spaces after special tokens
_UpperCAmelCase = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase )} ) # mask token has a left space
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(lowerCamelCase )
_UpperCAmelCase = """Encode <mask> sequence"""
_UpperCAmelCase = """Encode <mask>sequence"""
_UpperCAmelCase = tokenizer.encode(lowerCamelCase )
_UpperCAmelCase = encoded.index(lowerCamelCase )
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowerCamelCase , lowerCamelCase )
_UpperCAmelCase = tokenizer.encode(lowerCamelCase )
_UpperCAmelCase = encoded.index(lowerCamelCase )
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowerCamelCase , lowerCamelCase )
def lowerCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def lowerCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
_UpperCAmelCase = self.tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
_UpperCAmelCase = """A, <mask> AllenNLP sentence."""
_UpperCAmelCase = tokenizer_r.encode_plus(lowerCamelCase , add_special_tokens=lowerCamelCase , return_token_type_ids=lowerCamelCase )
_UpperCAmelCase = tokenizer_p.encode_plus(lowerCamelCase , add_special_tokens=lowerCamelCase , return_token_type_ids=lowerCamelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
_UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
_UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
lowerCamelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
lowerCamelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def lowerCamelCase ( self : int ) -> str:
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase )
_UpperCAmelCase = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
_UpperCAmelCase = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , lowerCamelCase )
self.assertEqual(post_processor_state["""add_prefix_space"""] , lowerCamelCase )
self.assertEqual(post_processor_state["""trim_offsets"""] , lowerCamelCase )
def lowerCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_UpperCAmelCase = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
_UpperCAmelCase = f"""{text_of_1_token} {text_of_1_token}"""
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase , use_fast=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase )
_UpperCAmelCase = tokenizer_r(lowerCamelCase , return_offsets_mapping=lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase ) + 1, len(lowerCamelCase ) + 1 + len(lowerCamelCase )) , )
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase , use_fast=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase )
_UpperCAmelCase = tokenizer_r(lowerCamelCase , return_offsets_mapping=lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase ) + 1, len(lowerCamelCase ) + 1 + len(lowerCamelCase )) , )
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase , use_fast=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase )
_UpperCAmelCase = tokenizer_r(lowerCamelCase , return_offsets_mapping=lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase ), len(lowerCamelCase ) + 1 + len(lowerCamelCase )) , )
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase , use_fast=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase )
_UpperCAmelCase = tokenizer_r(lowerCamelCase , return_offsets_mapping=lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase ), len(lowerCamelCase ) + 1 + len(lowerCamelCase )) , )
_UpperCAmelCase = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase , use_fast=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase )
_UpperCAmelCase = tokenizer_r(lowerCamelCase , return_offsets_mapping=lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase ) + 1, 1 + len(lowerCamelCase ) + 1 + len(lowerCamelCase )) , )
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase , use_fast=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase )
_UpperCAmelCase = tokenizer_r(lowerCamelCase , return_offsets_mapping=lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase ), 1 + len(lowerCamelCase ) + 1 + len(lowerCamelCase )) , )
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase , use_fast=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase )
_UpperCAmelCase = tokenizer_r(lowerCamelCase , return_offsets_mapping=lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase ), 1 + len(lowerCamelCase ) + 1 + len(lowerCamelCase )) , )
| 108
| 1
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = GPTSanJapaneseTokenizer
_lowerCAmelCase : Optional[Any] = False
_lowerCAmelCase : Optional[int] = {'''do_clean_text''': False, '''add_prefix_space''': False}
def snake_case ( self ):
"""simple docstring"""
super().setUp()
# fmt: off
snake_case = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
snake_case = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
snake_case = {"""unk_token""": """<unk>"""}
snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['emoji_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.emoji_file , 'w' ) as emoji_writer:
emoji_writer.write(json.dumps(__a ) )
def snake_case ( self , **lowerCAmelCase ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__a )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
snake_case = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = self.get_input_output_texts(__a )
snake_case = tokenizer.encode(__a , add_special_tokens=__a )
snake_case = tokenizer.decode(__a , clean_up_tokenization_spaces=__a )
return text, ids
def snake_case ( self ):
"""simple docstring"""
pass # TODO add if relevant
def snake_case ( self ):
"""simple docstring"""
pass # TODO add if relevant
def snake_case ( self ):
"""simple docstring"""
pass # TODO add if relevant
def snake_case ( self ):
"""simple docstring"""
snake_case = self.get_tokenizer()
# Testing tokenization
snake_case = """こんにちは、世界。 こんばんは、㔺界。"""
snake_case = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
snake_case = tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids without special tokens
snake_case = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
snake_case = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids with special tokens
snake_case = tokens + [tokenizer.unk_token]
snake_case = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
snake_case = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(__a , __a )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.get_tokenizer()
# Testing tokenization
snake_case = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
snake_case = """こんにちは、、、、世界。こんばんは、、、、世界。"""
snake_case = tokenizer.encode(__a )
snake_case = tokenizer.decode(__a )
self.assertEqual(__a , __a )
@slow
def snake_case ( self ):
"""simple docstring"""
snake_case = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
snake_case = """こんにちは、世界。"""
snake_case = """こんばんは、㔺界。😀"""
snake_case = """こんにちは、世界。こんばんは、世界。😀"""
snake_case = tokenizer.encode(prefix_text + input_text )
snake_case = tokenizer.encode('' , prefix_text=prefix_text + input_text )
snake_case = tokenizer.encode(__a , prefix_text=__a )
snake_case = tokenizer.decode(__a )
snake_case = tokenizer.decode(__a )
snake_case = tokenizer.decode(__a )
self.assertEqual(__a , __a )
self.assertEqual(__a , __a )
self.assertEqual(__a , __a )
@slow
def snake_case ( self ):
"""simple docstring"""
snake_case = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
snake_case = """こんにちは、世界。"""
snake_case = """こんばんは、㔺界。😀"""
snake_case = len(tokenizer.encode(__a ) ) - 2
snake_case = len(tokenizer.encode(__a ) ) - 2
snake_case = [1] + [0] * (len_prefix + len_text + 1)
snake_case = [1] * (len_prefix + len_text + 1) + [0]
snake_case = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
snake_case = tokenizer(prefix_text + input_text ).token_type_ids
snake_case = tokenizer('' , prefix_text=prefix_text + input_text ).token_type_ids
snake_case = tokenizer(__a , prefix_text=__a ).token_type_ids
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
@slow
def snake_case ( self ):
"""simple docstring"""
snake_case = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
snake_case = tokenizer.encode('あンいワ' )
snake_case = tokenizer.encode('' , prefix_text='あンいワ' )
snake_case = tokenizer.encode('いワ' , prefix_text='あン' )
self.assertEqual(tokenizer.decode(__a ) , tokenizer.decode(__a ) )
self.assertEqual(tokenizer.decode(__a ) , tokenizer.decode(__a ) )
self.assertNotEqual(__a , __a )
self.assertNotEqual(__a , __a )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def snake_case ( self ):
"""simple docstring"""
snake_case = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
snake_case = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
snake_case = tokenizer(__a , padding=__a )
snake_case = tokenizer.batch_encode_plus(__a , padding=__a )
# fmt: off
snake_case = [[3_59_93, 86_40, 2_59_48, 3_59_98, 3_06_47, 3_56_75, 3_59_99, 3_59_99], [3_59_93, 1_03_82, 98_68, 3_59_98, 3_06_46, 94_59, 3_06_46, 3_56_75]]
snake_case = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
snake_case = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __a )
self.assertListEqual(x_token.token_type_ids , __a )
self.assertListEqual(x_token.attention_mask , __a )
self.assertListEqual(x_token_a.input_ids , __a )
self.assertListEqual(x_token_a.token_type_ids , __a )
self.assertListEqual(x_token_a.attention_mask , __a )
def snake_case ( self ):
"""simple docstring"""
pass
def snake_case ( self ):
"""simple docstring"""
pass
| 720
|
"""simple docstring"""
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
pass
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
pass
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
snake_case = [
[],
[],
[],
]
def snake_case ( self , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
try:
if len(self.queues[priority] ) >= 1_00:
raise OverflowError('Maximum queue size is 100' )
self.queues[priority].append(lowerCAmelCase )
except IndexError:
raise ValueError('Valid priorities are 0, 1, and 2' )
def snake_case ( self ):
"""simple docstring"""
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('All queues are empty' )
def __str__( self ):
"""simple docstring"""
return "\n".join(F"""Priority {i}: {q}""" for i, q in enumerate(self.queues ) )
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
snake_case = []
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
if len(self.queue ) == 1_00:
raise OverFlowError('Maximum queue size is 100' )
self.queue.append(lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
if not self.queue:
raise UnderFlowError('The queue is empty' )
else:
snake_case = min(self.queue )
self.queue.remove(lowerCAmelCase )
return data
def __str__( self ):
"""simple docstring"""
return str(self.queue )
def lowerCAmelCase__ ( ) -> int:
"""simple docstring"""
snake_case = FixedPriorityQueue()
fpq.enqueue(0 , 1_0 )
fpq.enqueue(1 , 7_0 )
fpq.enqueue(0 , 1_0_0 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 6_4 )
fpq.enqueue(0 , 1_2_8 )
print(_UpperCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(_UpperCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def lowerCAmelCase__ ( ) -> List[str]:
"""simple docstring"""
snake_case = ElementPriorityQueue()
epq.enqueue(1_0 )
epq.enqueue(7_0 )
epq.enqueue(1_0_0 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(6_4 )
epq.enqueue(1_2_8 )
print(_UpperCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(_UpperCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 104
| 0
|
"""simple docstring"""
import math
def a ( __UpperCAmelCase : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a ( __UpperCAmelCase : float = 0.1 ) -> int:
__magic_name__: Union[str, Any] = 3
__magic_name__: Union[str, Any] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(__UpperCAmelCase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 96
|
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__a =42
__a =42
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Union[str, Any] , __a : int ):
_a = [[] for _ in range(__a )]
_a = size
def __getitem__( self : int , __a : int ):
return iter(self._graph[vertex] )
@property
def UpperCamelCase__ ( self : Dict ):
return self._size
def UpperCamelCase__ ( self : Union[str, Any] , __a : int , __a : int , __a : int ):
if weight not in (0, 1):
raise ValueError("Edge weight must be either 0 or 1." )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("Vertex indexes must be in [0; size)." )
self._graph[from_vertex].append(Edge(__a , __a ) )
def UpperCamelCase__ ( self : Tuple , __a : int , __a : int ):
_a = deque([start_vertex] )
_a = [None] * self.size
_a = 0
while queue:
_a = queue.popleft()
_a = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_a = current_distance + edge.weight
_a = distances[edge.destination_vertex]
if (
isinstance(__a , __a )
and new_distance >= dest_vertex_distance
):
continue
_a = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("No path from start_vertex to finish_vertex." )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 692
| 0
|
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class a__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> Dict:
'''simple docstring'''
self.assertEqual(len(lowercase ) , len(lowercase ) )
for a, b in zip(lowercase , lowercase ):
self.assertAlmostEqual(lowercase , lowercase , delta=lowercase )
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(lowercase ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 )
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = None
ops.enable_eager_execution_internal()
A__ = tf.config.list_physical_devices("CPU" )
if len(lowercase ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
A__ = tf.config.list_logical_devices(device_type="CPU" )
A__ = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
A__ = GradientAccumulator()
A__ = tf.Variable([4.0, 3.0] )
A__ , A__ = create_optimizer(5e-5 , 10 , 5 )
A__ = tf.Variable([0.0, 0.0] , trainable=lowercase )
def accumulate_on_replica(lowercase ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(lowercase , lowercase ):
with strategy.scope():
A__ = strategy.experimental_local_results(lowercase )
local_variables[0].assign(lowercase )
local_variables[1].assign(lowercase )
strategy.run(lowercase , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(lowercase )
def _check_local_values(lowercase , lowercase ):
A__ = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , lowercase , tol=1e-2 )
self.assertListAlmostEqual(values[1].value() , lowercase , tol=1e-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 626
|
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class a__ :
"""simple docstring"""
def __init__( self , lowercase ) -> int:
'''simple docstring'''
A__ = data
A__ = None
class a__ :
"""simple docstring"""
def __init__( self ) -> List[Any]:
'''simple docstring'''
A__ = None
A__ = None
def __iter__( self ) -> Iterator[Any]:
'''simple docstring'''
A__ = self.head
while self.head:
yield node.data
A__ = node.next
if node == self.head:
break
def __len__( self ) -> int:
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self ) -> List[Any]:
'''simple docstring'''
return "->".join(str(lowercase ) for item in iter(self ) )
def UpperCamelCase ( self , lowercase ) -> None:
'''simple docstring'''
self.insert_nth(len(self ) , lowercase )
def UpperCamelCase ( self , lowercase ) -> None:
'''simple docstring'''
self.insert_nth(0 , lowercase )
def UpperCamelCase ( self , lowercase , lowercase ) -> None:
'''simple docstring'''
if index < 0 or index > len(self ):
raise IndexError("list index out of range." )
A__ = Node(lowercase )
if self.head is None:
A__ = new_node # first node points itself
A__ = A__ = new_node
elif index == 0: # insert at head
A__ = self.head
A__ = A__ = new_node
else:
A__ = self.head
for _ in range(index - 1 ):
A__ = temp.next
A__ = temp.next
A__ = new_node
if index == len(self ) - 1: # insert at tail
A__ = new_node
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
return self.delete_nth(0 )
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def UpperCamelCase ( self , lowercase = 0 ) -> Any:
'''simple docstring'''
if not 0 <= index < len(self ):
raise IndexError("list index out of range." )
A__ = self.head
if self.head == self.tail: # just one node
A__ = A__ = None
elif index == 0: # delete head node
A__ = self.tail.next.next
A__ = self.head.next
else:
A__ = self.head
for _ in range(index - 1 ):
A__ = temp.next
A__ = temp.next
A__ = temp.next.next
if index == len(self ) - 1: # delete at tail
A__ = temp
return delete_node.data
def UpperCamelCase ( self ) -> bool:
'''simple docstring'''
return len(self ) == 0
def lowerCAmelCase__ ( ) -> None:
'''simple docstring'''
A__ = CircularLinkedList()
assert len(SCREAMING_SNAKE_CASE_ ) == 0
assert circular_linked_list.is_empty() is True
assert str(SCREAMING_SNAKE_CASE_ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(SCREAMING_SNAKE_CASE_ ) == i
circular_linked_list.insert_nth(SCREAMING_SNAKE_CASE_ , i + 1 )
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 626
| 1
|
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class __UpperCAmelCase ( __a ):
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCAmelCase_ = dataset
lowerCAmelCase_ = process
lowerCAmelCase_ = params
def __len__( self ):
return len(self.dataset )
def __getitem__( self , _lowerCamelCase ):
lowerCAmelCase_ = self.dataset[i]
lowerCAmelCase_ = self.process(_lowerCamelCase , **self.params )
return processed
class __UpperCAmelCase ( __a ):
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
lowerCAmelCase_ = loader
lowerCAmelCase_ = infer
lowerCAmelCase_ = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
lowerCAmelCase_ = None
lowerCAmelCase_ = loader_batch_size
# Internal bookkeeping
lowerCAmelCase_ = None
lowerCAmelCase_ = None
def __len__( self ):
return len(self.loader )
def __iter__( self ):
lowerCAmelCase_ = iter(self.loader )
return self
def UpperCAmelCase_ ( self ):
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
lowerCAmelCase_ = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
lowerCAmelCase_ = {}
for k, element in self._loader_batch_data.items():
if isinstance(_lowerCamelCase , _lowerCamelCase ):
# Convert ModelOutput to tuple first
lowerCAmelCase_ = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
lowerCAmelCase_ = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
lowerCAmelCase_ = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_lowerCamelCase , _lowerCamelCase ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
lowerCAmelCase_ = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
lowerCAmelCase_ = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
lowerCAmelCase_ = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowerCAmelCase_ = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowerCAmelCase_ = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
lowerCAmelCase_ = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
lowerCAmelCase_ = self._loader_batch_data.__class__(_lowerCamelCase )
self._loader_batch_index += 1
return result
def UpperCAmelCase_ ( self ):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
lowerCAmelCase_ = next(self.iterator )
lowerCAmelCase_ = self.infer(_lowerCamelCase , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(_lowerCamelCase , torch.Tensor ):
lowerCAmelCase_ = processed
else:
lowerCAmelCase_ = list(processed.keys() )[0]
lowerCAmelCase_ = processed[key]
if isinstance(_lowerCamelCase , _lowerCamelCase ):
lowerCAmelCase_ = len(_lowerCamelCase )
else:
lowerCAmelCase_ = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowerCAmelCase_ = observed_batch_size
# Setting internal index to unwrap the batch
lowerCAmelCase_ = processed
lowerCAmelCase_ = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class __UpperCAmelCase ( __a ):
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
super().__init__(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __iter__( self ):
lowerCAmelCase_ = iter(self.loader )
lowerCAmelCase_ = None
return self
def UpperCAmelCase_ ( self ):
if self.subiterator is None:
lowerCAmelCase_ = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
lowerCAmelCase_ = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
lowerCAmelCase_ = self.infer(next(self.iterator ) , **self.params )
lowerCAmelCase_ = next(self.subiterator )
return processed
class __UpperCAmelCase ( __a ):
def __iter__( self ):
lowerCAmelCase_ = iter(self.loader )
return self
def UpperCAmelCase_ ( self ):
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
lowerCAmelCase_ = False
lowerCAmelCase_ = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
lowerCAmelCase_ = self.loader_batch_item()
lowerCAmelCase_ = item.pop('''is_last''' )
accumulator.append(_lowerCamelCase )
if is_last:
return accumulator
while not is_last:
lowerCAmelCase_ = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(_lowerCamelCase , torch.Tensor ):
lowerCAmelCase_ = processed
else:
lowerCAmelCase_ = list(processed.keys() )[0]
lowerCAmelCase_ = processed[key]
if isinstance(_lowerCamelCase , _lowerCamelCase ):
lowerCAmelCase_ = len(_lowerCamelCase )
else:
lowerCAmelCase_ = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowerCAmelCase_ = observed_batch_size
lowerCAmelCase_ = processed
lowerCAmelCase_ = 0
while self._loader_batch_index < self.loader_batch_size:
lowerCAmelCase_ = self.loader_batch_item()
lowerCAmelCase_ = item.pop('''is_last''' )
accumulator.append(_lowerCamelCase )
if is_last:
return accumulator
else:
lowerCAmelCase_ = processed
lowerCAmelCase_ = item.pop('''is_last''' )
accumulator.append(_lowerCamelCase )
return accumulator
class __UpperCAmelCase ( __a ):
def __init__( self , _lowerCamelCase , _lowerCamelCase ):
lowerCAmelCase_ = dataset
lowerCAmelCase_ = key
def __len__( self ):
return len(self.dataset )
def __getitem__( self , _lowerCamelCase ):
return self.dataset[i][self.key]
class __UpperCAmelCase ( __a ):
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCAmelCase_ = dataset
lowerCAmelCase_ = keya
lowerCAmelCase_ = keya
def __len__( self ):
return len(self.dataset )
def __getitem__( self , _lowerCamelCase ):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 274
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
def snake_case_ ( __snake_case : list[Any]) -> None:
create_state_space_tree(__snake_case , [] , 0)
def snake_case_ ( __snake_case : list[Any] , __snake_case : list[Any] , __snake_case : int) -> None:
if index == len(__snake_case):
print(__snake_case)
return
create_state_space_tree(__snake_case , __snake_case , index + 1)
current_subsequence.append(sequence[index])
create_state_space_tree(__snake_case , __snake_case , index + 1)
current_subsequence.pop()
if __name__ == "__main__":
A_ : list[Any] =[3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['''A''', '''B''', '''C'''])
generate_all_subsequences(seq)
| 274
| 1
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self : Optional[Any] , __snake_case : List[str] , ):
'''simple docstring'''
UpperCAmelCase_ : Any = parent
UpperCAmelCase_ : Dict = 13
UpperCAmelCase_ : Any = 7
UpperCAmelCase_ : str = True
UpperCAmelCase_ : str = True
UpperCAmelCase_ : Dict = True
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : Union[str, Any] = True
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : Tuple = False
UpperCAmelCase_ : Any = False
UpperCAmelCase_ : Any = 2
UpperCAmelCase_ : List[Any] = 99
UpperCAmelCase_ : Optional[int] = 0
UpperCAmelCase_ : Optional[int] = 32
UpperCAmelCase_ : Tuple = 2
UpperCAmelCase_ : List[str] = 4
UpperCAmelCase_ : List[Any] = 0.1
UpperCAmelCase_ : Union[str, Any] = 0.1
UpperCAmelCase_ : Union[str, Any] = 512
UpperCAmelCase_ : Any = 16
UpperCAmelCase_ : Dict = 2
UpperCAmelCase_ : Union[str, Any] = 0.02
UpperCAmelCase_ : Any = 3
UpperCAmelCase_ : str = 4
UpperCAmelCase_ : Any = '''last'''
UpperCAmelCase_ : List[str] = True
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Optional[Any] = 0
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
UpperCAmelCase_ : int = None
if self.use_input_lengths:
UpperCAmelCase_ : Union[str, Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCAmelCase_ : Optional[int] = None
if self.use_token_type_ids:
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCAmelCase_ : str = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Dict = None
if self.use_labels:
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : str = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : Union[str, Any] = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _lowerCamelCase ( self : List[Any] , __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : Tuple , __snake_case : Optional[Any] , __snake_case : int , __snake_case : List[Any] , __snake_case : str , __snake_case : List[Any] , __snake_case : Tuple , ):
'''simple docstring'''
UpperCAmelCase_ : Any = TFFlaubertModel(config=__snake_case )
UpperCAmelCase_ : Any = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
UpperCAmelCase_ : Union[str, Any] = model(__snake_case )
UpperCAmelCase_ : str = [input_ids, input_mask]
UpperCAmelCase_ : List[str] = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : int , __snake_case : Optional[int] , __snake_case : int , __snake_case : Any , __snake_case : Dict , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : int , __snake_case : Optional[int] , __snake_case : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase_ : int = TFFlaubertWithLMHeadModel(__snake_case )
UpperCAmelCase_ : Optional[int] = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
UpperCAmelCase_ : int = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : Optional[Any] , __snake_case : Tuple , __snake_case : Optional[Any] , __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : Optional[int] , __snake_case : Dict , ):
'''simple docstring'''
UpperCAmelCase_ : str = TFFlaubertForQuestionAnsweringSimple(__snake_case )
UpperCAmelCase_ : List[str] = {'''input_ids''': input_ids, '''lengths''': input_lengths}
UpperCAmelCase_ : int = model(__snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self : str , __snake_case : Optional[int] , __snake_case : int , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Any , ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = TFFlaubertForSequenceClassification(__snake_case )
UpperCAmelCase_ : Tuple = {'''input_ids''': input_ids, '''lengths''': input_lengths}
UpperCAmelCase_ : Tuple = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCamelCase ( self : Tuple , __snake_case : Dict , __snake_case : str , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : Any , __snake_case : List[str] , __snake_case : List[str] , __snake_case : Dict , __snake_case : List[Any] , ):
'''simple docstring'''
UpperCAmelCase_ : int = self.num_labels
UpperCAmelCase_ : Tuple = TFFlaubertForTokenClassification(config=__snake_case )
UpperCAmelCase_ : List[str] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCAmelCase_ : List[str] = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self : Optional[Any] , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : List[Any] , __snake_case : Any , __snake_case : Tuple , __snake_case : Optional[Any] , __snake_case : int , __snake_case : Optional[int] , __snake_case : Any , ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = self.num_choices
UpperCAmelCase_ : List[str] = TFFlaubertForMultipleChoice(config=__snake_case )
UpperCAmelCase_ : List[str] = tf.tile(tf.expand_dims(__snake_case , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase_ : Optional[int] = tf.tile(tf.expand_dims(__snake_case , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase_ : List[Any] = tf.tile(tf.expand_dims(__snake_case , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase_ : str = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
UpperCAmelCase_ : Dict = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
(
UpperCAmelCase_
) : Optional[int] = config_and_inputs
UpperCAmelCase_ : Optional[int] = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''langs''': token_type_ids,
'''lengths''': input_lengths,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase__( snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
A_ : Dict = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
A_ : List[Any] = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
A_ : str = (
{
'feature-extraction': TFFlaubertModel,
'fill-mask': TFFlaubertWithLMHeadModel,
'question-answering': TFFlaubertForQuestionAnsweringSimple,
'text-classification': TFFlaubertForSequenceClassification,
'token-classification': TFFlaubertForTokenClassification,
'zero-shot': TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
A_ : Any = False
A_ : Any = False
def _lowerCamelCase ( self : Tuple , __snake_case : Tuple , __snake_case : Any , __snake_case : str , __snake_case : int , __snake_case : Optional[Any] ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = TFFlaubertModelTester(self )
UpperCAmelCase_ : Union[str, Any] = ConfigTester(self , config_class=__snake_case , emb_dim=37 )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__snake_case )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__snake_case )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__snake_case )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__snake_case )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*__snake_case )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*__snake_case )
@slow
def _lowerCamelCase ( self : int ):
'''simple docstring'''
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Dict = TFFlaubertModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = TFFlaubertModel.from_pretrained('''jplu/tf-flaubert-small-cased''' )
UpperCAmelCase_ : int = tf.convert_to_tensor(
[[0, 158, 735, 2_592, 1_424, 6_727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
UpperCAmelCase_ : Any = model(__snake_case )[0]
UpperCAmelCase_ : Dict = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , __snake_case )
# compare the actual values for a slice.
UpperCAmelCase_ : List[str] = tf.convert_to_tensor(
[
[
[-1.8_768_773, -1.566_555, 0.27_072_418],
[-1.6_920_038, -0.5_873_505, 1.9_329_599],
[-2.9_563_985, -1.6_993_835, 1.7_972_052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 715
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase : List[Any] = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__UpperCamelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 641
| 0
|
import itertools
import string
from collections.abc import Generator, Iterable
def UpperCamelCase ( lowercase_ , lowercase_ ) -> Generator[tuple[str, ...], None, None]:
'''simple docstring'''
lowercase__ : Any = iter(lowercase_ )
while True:
lowercase__ : Any = tuple(itertools.islice(lowercase_ , lowercase_ ) )
if not chunk:
return
yield chunk
def UpperCamelCase ( lowercase_ ) -> str:
'''simple docstring'''
lowercase__ : Union[str, Any] = """""".join([c.upper() for c in dirty if c in string.ascii_letters] )
lowercase__ : Union[str, Any] = """"""
if len(lowercase_ ) < 2:
return dirty
for i in range(len(lowercase_ ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(lowercase_ ) & 1:
clean += "X"
return clean
def UpperCamelCase ( lowercase_ ) -> list[str]:
'''simple docstring'''
lowercase__ : Optional[Any] = """ABCDEFGHIKLMNOPQRSTUVWXYZ"""
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
lowercase__ : List[Any] = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(lowercase_ )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(lowercase_ )
return table
def UpperCamelCase ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
lowercase__ : str = generate_table(lowercase_ )
lowercase__ : Optional[int] = prepare_input(lowercase_ )
lowercase__ : Optional[int] = """"""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(lowercase_ , 2 ):
lowercase__ , lowercase__ : Dict = divmod(table.index(lowercase_ ) , 5 )
lowercase__ , lowercase__ : Optional[int] = divmod(table.index(lowercase_ ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def UpperCamelCase ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
lowercase__ : int = generate_table(lowercase_ )
lowercase__ : Tuple = """"""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(lowercase_ , 2 ):
lowercase__ , lowercase__ : Optional[int] = divmod(table.index(lowercase_ ) , 5 )
lowercase__ , lowercase__ : int = divmod(table.index(lowercase_ ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 12
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a__ : List[str] = CycleDiffusionPipeline
a__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"negative_prompt",
"height",
"width",
"negative_prompt_embeds",
}
a__ : Optional[int] = PipelineTesterMixin.required_optional_params - {"latents"}
a__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"} )
a__ : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
a__ : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
def a ( self : Optional[int] ):
torch.manual_seed(0 )
__UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__UpperCAmelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , num_train_timesteps=10_00 , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
__UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
__UpperCAmelCase = CLIPTextModel(_lowercase )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def a ( self : Any , _lowercase : List[Any] , _lowercase : Optional[Any]=0 ):
__UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowercase ) ).to(_lowercase )
__UpperCAmelCase = image / 2 + 0.5
if str(_lowercase ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(_lowercase )
else:
__UpperCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__UpperCAmelCase = {
'''prompt''': '''An astronaut riding an elephant''',
'''source_prompt''': '''An astronaut riding a horse''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''eta''': 0.1,
'''strength''': 0.8,
'''guidance_scale''': 3,
'''source_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def a ( self : Optional[int] ):
__UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = CycleDiffusionPipeline(**_lowercase )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = self.get_dummy_inputs(_lowercase )
__UpperCAmelCase = pipe(**_lowercase )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__UpperCAmelCase = np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def a ( self : Optional[int] ):
__UpperCAmelCase = self.get_dummy_components()
for name, module in components.items():
if hasattr(_lowercase , '''half''' ):
__UpperCAmelCase = module.half()
__UpperCAmelCase = CycleDiffusionPipeline(**_lowercase )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = self.get_dummy_inputs(_lowercase )
__UpperCAmelCase = pipe(**_lowercase )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__UpperCAmelCase = np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def a ( self : Tuple ):
return super().test_save_load_local()
@unittest.skip('''non-deterministic pipeline''' )
def a ( self : List[str] ):
return super().test_inference_batch_single_identical()
@skip_mps
def a ( self : int ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def a ( self : str ):
return super().test_save_load_optional_components()
@skip_mps
def a ( self : int ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : int ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' )
__UpperCAmelCase = init_image.resize((5_12, 5_12) )
__UpperCAmelCase = '''CompVis/stable-diffusion-v1-4'''
__UpperCAmelCase = DDIMScheduler.from_pretrained(_lowercase , subfolder='''scheduler''' )
__UpperCAmelCase = CycleDiffusionPipeline.from_pretrained(
_lowercase , scheduler=_lowercase , safety_checker=_lowercase , torch_dtype=torch.floataa , revision='''fp16''' )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
__UpperCAmelCase = '''A black colored car'''
__UpperCAmelCase = '''A blue colored car'''
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , source_prompt=_lowercase , image=_lowercase , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def a ( self : Optional[Any] ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' )
__UpperCAmelCase = init_image.resize((5_12, 5_12) )
__UpperCAmelCase = '''CompVis/stable-diffusion-v1-4'''
__UpperCAmelCase = DDIMScheduler.from_pretrained(_lowercase , subfolder='''scheduler''' )
__UpperCAmelCase = CycleDiffusionPipeline.from_pretrained(_lowercase , scheduler=_lowercase , safety_checker=_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
__UpperCAmelCase = '''A black colored car'''
__UpperCAmelCase = '''A blue colored car'''
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , source_prompt=_lowercase , image=_lowercase , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 49
| 0
|
lowerCAmelCase_ : List[Any] = '\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
lowerCAmelCase_ : Optional[int] = [{'type': 'code', 'content': INSTALL_CONTENT}]
lowerCAmelCase_ : Union[str, Any] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 701
|
'''simple docstring'''
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
lowerCAmelCase_ : Any = argparse.ArgumentParser(
description=(
'Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='roberta', choices=['roberta', 'gpt2'])
parser.add_argument('--model_name', default='roberta-large', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_roberta_048131723.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
lowerCAmelCase_ : Dict = parser.parse_args()
if args.model_type == "roberta":
lowerCAmelCase_ : str = RobertaForMaskedLM.from_pretrained(args.model_name)
lowerCAmelCase_ : Tuple = 'roberta'
elif args.model_type == "gpt2":
lowerCAmelCase_ : List[Any] = GPTaLMHeadModel.from_pretrained(args.model_name)
lowerCAmelCase_ : Optional[Any] = 'transformer'
lowerCAmelCase_ : List[Any] = model.state_dict()
lowerCAmelCase_ : List[Any] = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
lowerCAmelCase_ : List[str] = state_dict[f'{prefix}.{param_name}']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
lowerCAmelCase_ : Any = f'{prefix}.embeddings.{w}.weight'
lowerCAmelCase_ : Any = state_dict[param_name]
for w in ["weight", "bias"]:
lowerCAmelCase_ : str = f'{prefix}.embeddings.LayerNorm.{w}'
lowerCAmelCase_ : str = state_dict[param_name]
# Transformer Blocks #
lowerCAmelCase_ : List[str] = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
lowerCAmelCase_ : List[str] = state_dict[
f'{prefix}.h.{teacher_idx}.{layer}.{w}'
]
lowerCAmelCase_ : Any = state_dict[f'{prefix}.h.{teacher_idx}.attn.bias']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
lowerCAmelCase_ : Any = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
lowerCAmelCase_ : List[Any] = state_dict[f'{layer}']
if args.vocab_transform:
for w in ["weight", "bias"]:
lowerCAmelCase_ : str = state_dict[f'lm_head.dense.{w}']
lowerCAmelCase_ : Optional[int] = state_dict[f'lm_head.layer_norm.{w}']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
lowerCAmelCase_ : Dict = state_dict[f'{prefix}.ln_f.{w}']
lowerCAmelCase_ : Any = state_dict['lm_head.weight']
print(f'N layers selected for distillation: {std_idx}')
print(f'Number of params transferred for distillation: {len(compressed_sd.keys())}')
print(f'Save transferred checkpoint to {args.dump_checkpoint}.')
torch.save(compressed_sd, args.dump_checkpoint)
| 464
| 0
|
'''simple docstring'''
__lowerCamelCase = '''Tobias Carryer'''
from time import time
class A__ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=int(time() ) ) -> Optional[int]: # noqa: B008
'''simple docstring'''
A_ = multiplier
A_ = increment
A_ = modulo
A_ = seed
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
__lowerCamelCase = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31)
while True:
print(lcg.next_number())
| 288
|
'''simple docstring'''
from math import pow
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, ) -> tuple[int, int]:
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
A_ = int(pow(UpperCAmelCase__, UpperCAmelCase__ ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
A_ , A_ = backtrack(
UpperCAmelCase__, UpperCAmelCase__, current_number + 1, UpperCAmelCase__, UpperCAmelCase__ )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
A_ , A_ = backtrack(
UpperCAmelCase__, UpperCAmelCase__, current_number + 1, UpperCAmelCase__, UpperCAmelCase__ )
return current_sum, solutions_count
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> int:
if not (1 <= needed_sum <= 10_00 and 2 <= power <= 10):
raise ValueError(
"""Invalid input\n"""
"""needed_sum must be between 1 and 1000, power between 2 and 10.""" )
return backtrack(UpperCAmelCase__, UpperCAmelCase__, 1, 0, 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 288
| 1
|
def _snake_case ( SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
_lowerCAmelCase : Any = 0
_lowerCAmelCase : Union[str, Any] = len(SCREAMING_SNAKE_CASE )
for i in range(n - 1 ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def _snake_case ( SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE ) <= 1:
return arr, 0
_lowerCAmelCase : List[str] = len(SCREAMING_SNAKE_CASE ) // 2
_lowerCAmelCase : str = arr[0:mid]
_lowerCAmelCase : Tuple = arr[mid:]
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = count_inversions_recursive(SCREAMING_SNAKE_CASE )
_lowerCAmelCase , _lowerCAmelCase : int = count_inversions_recursive(SCREAMING_SNAKE_CASE )
_lowerCAmelCase , _lowerCAmelCase : Tuple = _count_cross_inversions(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_lowerCAmelCase : int = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def _snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : int = 0
while i < len(SCREAMING_SNAKE_CASE ) and j < len(SCREAMING_SNAKE_CASE ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(SCREAMING_SNAKE_CASE ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(SCREAMING_SNAKE_CASE ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def _snake_case ( ) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
_lowerCAmelCase : List[Any] = count_inversions_bf(SCREAMING_SNAKE_CASE )
_lowerCAmelCase , _lowerCAmelCase : str = count_inversions_recursive(SCREAMING_SNAKE_CASE )
assert num_inversions_bf == num_inversions_recursive == 8
print("number of inversions = " , SCREAMING_SNAKE_CASE )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
_lowerCAmelCase : str = count_inversions_bf(SCREAMING_SNAKE_CASE )
_lowerCAmelCase , _lowerCAmelCase : List[Any] = count_inversions_recursive(SCREAMING_SNAKE_CASE )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , SCREAMING_SNAKE_CASE )
# an empty list should also have zero inversions
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : str = count_inversions_bf(SCREAMING_SNAKE_CASE )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = count_inversions_recursive(SCREAMING_SNAKE_CASE )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 503
|
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def _snake_case ( SCREAMING_SNAKE_CASE ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
_lowerCAmelCase : int = []
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : List[Any] = []
for rt in rc.restypes:
_lowerCAmelCase : int = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
_lowerCAmelCase : str = {name: i for i, name in enumerate(SCREAMING_SNAKE_CASE )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
_lowerCAmelCase : List[str] = torch.tensor(
SCREAMING_SNAKE_CASE , dtype=torch.intaa , device=protein["aatype"].device , )
_lowerCAmelCase : str = torch.tensor(
SCREAMING_SNAKE_CASE , dtype=torch.intaa , device=protein["aatype"].device , )
_lowerCAmelCase : Optional[Any] = torch.tensor(
SCREAMING_SNAKE_CASE , dtype=torch.floataa , device=protein["aatype"].device , )
_lowerCAmelCase : List[str] = protein["aatype"].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
_lowerCAmelCase : Dict = restype_atomaa_to_atomaa[protein_aatype]
_lowerCAmelCase : Optional[Any] = restype_atomaa_mask[protein_aatype]
_lowerCAmelCase : Any = residx_atomaa_mask
_lowerCAmelCase : Dict = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
_lowerCAmelCase : Optional[int] = restype_atomaa_to_atomaa[protein_aatype]
_lowerCAmelCase : str = residx_atomaa_to_atomaa.long()
# create the corresponding mask
_lowerCAmelCase : Dict = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["aatype"].device )
for restype, restype_letter in enumerate(rc.restypes ):
_lowerCAmelCase : Optional[Any] = rc.restype_atoa[restype_letter]
_lowerCAmelCase : int = rc.residue_atoms[restype_name]
for atom_name in atom_names:
_lowerCAmelCase : Union[str, Any] = rc.atom_order[atom_name]
_lowerCAmelCase : Dict = 1
_lowerCAmelCase : Tuple = restype_atomaa_mask[protein_aatype]
_lowerCAmelCase : List[str] = residx_atomaa_mask
return protein
def _snake_case ( SCREAMING_SNAKE_CASE ) -> Dict[str, np.ndarray]:
"""simple docstring"""
_lowerCAmelCase : Any = tree_map(lambda SCREAMING_SNAKE_CASE : torch.tensor(SCREAMING_SNAKE_CASE , device=batch["aatype"].device ) , SCREAMING_SNAKE_CASE , np.ndarray )
_lowerCAmelCase : Optional[int] = tensor_tree_map(lambda SCREAMING_SNAKE_CASE : np.array(SCREAMING_SNAKE_CASE ) , make_atomaa_masks(SCREAMING_SNAKE_CASE ) )
return out
| 503
| 1
|
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any]=7 , __UpperCAmelCase : Any=3 , __UpperCAmelCase : Tuple=30 , __UpperCAmelCase : Optional[Any]=400 , __UpperCAmelCase : int=True , __UpperCAmelCase : Any=None , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : Dict=[0.5, 0.5, 0.5] , __UpperCAmelCase : Any=[0.5, 0.5, 0.5] , __UpperCAmelCase : str=True , __UpperCAmelCase : Any=1 / 255 , __UpperCAmelCase : Dict=True , ):
'''simple docstring'''
_A = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
_A = parent
_A = batch_size
_A = num_channels
_A = min_resolution
_A = max_resolution
_A = do_resize
_A = size
_A = do_normalize
_A = image_mean
_A = image_std
_A = do_rescale
_A = rescale_factor
_A = do_pad
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : int=False ):
'''simple docstring'''
if not batched:
_A = image_inputs[0]
if isinstance(_UpperCAmelCase , Image.Image ):
_A = image.size
else:
_A = image.shape[1], image.shape[2]
if w < h:
_A = int(self.size["shortest_edge"] * h / w )
_A = self.size['shortest_edge']
elif w > h:
_A = self.size['shortest_edge']
_A = int(self.size["shortest_edge"] * w / h )
else:
_A = self.size['shortest_edge']
_A = self.size['shortest_edge']
else:
_A = []
for image in image_inputs:
_A = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_A = max(_UpperCAmelCase , key=lambda __UpperCAmelCase : item[0] )[0]
_A = max(_UpperCAmelCase , key=lambda __UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _UpperCAmelCase ( a__ , unittest.TestCase ):
"""simple docstring"""
snake_case = DeformableDetrImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = DeformableDetrImageProcessingTester(self )
@property
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , "image_mean" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "image_std" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_rescale" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_pad" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "size" ) )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
_A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , _UpperCAmelCase )
_A = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_UpperCAmelCase )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , _UpperCAmelCase )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
pass
def lowerCAmelCase ( self : str ):
'''simple docstring'''
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_A = self.image_processor_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_A = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
_A = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
_A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_A = self.image_processor_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_A = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values
_A = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
_A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_A = self.image_processor_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_A = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values
_A = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
_A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
_A = json.loads(f.read() )
_A = {'image_id': 39769, 'annotations': target}
# encode them
_A = DeformableDetrImageProcessor()
_A = image_processing(images=_UpperCAmelCase , annotations=_UpperCAmelCase , return_tensors="pt" )
# verify pixel values
_A = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , _UpperCAmelCase )
_A = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , _UpperCAmelCase , atol=1E-4 ) )
# verify area
_A = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , _UpperCAmelCase ) )
# verify boxes
_A = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , _UpperCAmelCase )
_A = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , _UpperCAmelCase , atol=1E-3 ) )
# verify image_id
_A = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , _UpperCAmelCase ) )
# verify is_crowd
_A = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , _UpperCAmelCase ) )
# verify class_labels
_A = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , _UpperCAmelCase ) )
# verify orig_size
_A = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , _UpperCAmelCase ) )
# verify size
_A = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , _UpperCAmelCase ) )
@slow
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
_A = json.loads(f.read() )
_A = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target}
_A = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
_A = DeformableDetrImageProcessor(format="coco_panoptic" )
_A = image_processing(images=_UpperCAmelCase , annotations=_UpperCAmelCase , masks_path=_UpperCAmelCase , return_tensors="pt" )
# verify pixel values
_A = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , _UpperCAmelCase )
_A = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , _UpperCAmelCase , atol=1E-4 ) )
# verify area
_A = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , _UpperCAmelCase ) )
# verify boxes
_A = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , _UpperCAmelCase )
_A = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , _UpperCAmelCase , atol=1E-3 ) )
# verify image_id
_A = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , _UpperCAmelCase ) )
# verify is_crowd
_A = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , _UpperCAmelCase ) )
# verify class_labels
_A = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , _UpperCAmelCase ) )
# verify masks
_A = 822873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , _UpperCAmelCase )
# verify orig_size
_A = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , _UpperCAmelCase ) )
# verify size
_A = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , _UpperCAmelCase ) )
| 330
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = 42
class SCREAMING_SNAKE_CASE (a__ , a__ ):
@register_to_config
def __init__( self , _UpperCAmelCase = 6_5536 , _UpperCAmelCase = None , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , _UpperCAmelCase = 0 , _UpperCAmelCase = "fourier" , _UpperCAmelCase = True , _UpperCAmelCase = False , _UpperCAmelCase = 0.0 , _UpperCAmelCase = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , _UpperCAmelCase = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , _UpperCAmelCase = "UNetMidBlock1D" , _UpperCAmelCase = None , _UpperCAmelCase = (32, 32, 64) , _UpperCAmelCase = None , _UpperCAmelCase = 8 , _UpperCAmelCase = 1 , _UpperCAmelCase = False , ):
'''simple docstring'''
super().__init__()
__A : Dict = sample_size
# time
if time_embedding_type == "fourier":
__A : int = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=_UpperCAmelCase , log=_UpperCAmelCase , flip_sin_to_cos=_UpperCAmelCase)
__A : Any = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
__A : List[str] = Timesteps(
block_out_channels[0] , flip_sin_to_cos=_UpperCAmelCase , downscale_freq_shift=_UpperCAmelCase)
__A : List[str] = block_out_channels[0]
if use_timestep_embedding:
__A : Optional[Any] = block_out_channels[0] * 4
__A : Optional[int] = TimestepEmbedding(
in_channels=_UpperCAmelCase , time_embed_dim=_UpperCAmelCase , act_fn=_UpperCAmelCase , out_dim=block_out_channels[0] , )
__A : Dict = nn.ModuleList([])
__A : Dict = None
__A : Tuple = nn.ModuleList([])
__A : Tuple = None
# down
__A : Any = in_channels
for i, down_block_type in enumerate(_UpperCAmelCase):
__A : Tuple = output_channel
__A : Optional[Any] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
__A : List[str] = i == len(_UpperCAmelCase) - 1
__A : int = get_down_block(
_UpperCAmelCase , num_layers=_UpperCAmelCase , in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(_UpperCAmelCase)
# mid
__A : str = get_mid_block(
_UpperCAmelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=_UpperCAmelCase , add_downsample=_UpperCAmelCase , )
# up
__A : Optional[int] = list(reversed(_UpperCAmelCase))
__A : Optional[int] = reversed_block_out_channels[0]
if out_block_type is None:
__A : str = out_channels
else:
__A : List[Any] = block_out_channels[0]
for i, up_block_type in enumerate(_UpperCAmelCase):
__A : Optional[Any] = output_channel
__A : Optional[Any] = (
reversed_block_out_channels[i + 1] if i < len(_UpperCAmelCase) - 1 else final_upsample_channels
)
__A : Dict = i == len(_UpperCAmelCase) - 1
__A : str = get_up_block(
_UpperCAmelCase , num_layers=_UpperCAmelCase , in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(_UpperCAmelCase)
__A : Optional[int] = output_channel
# out
__A : str = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32)
__A : Optional[Any] = get_out_block(
out_block_type=_UpperCAmelCase , num_groups_out=_UpperCAmelCase , embed_dim=block_out_channels[0] , out_channels=_UpperCAmelCase , act_fn=_UpperCAmelCase , fc_dim=block_out_channels[-1] // 4 , )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True , ):
'''simple docstring'''
__A : Any = timestep
if not torch.is_tensor(_UpperCAmelCase):
__A : Any = torch.tensor([timesteps] , dtype=torch.long , device=sample.device)
elif torch.is_tensor(_UpperCAmelCase) and len(timesteps.shape) == 0:
__A : Any = timesteps[None].to(sample.device)
__A : List[Any] = self.time_proj(_UpperCAmelCase)
if self.config.use_timestep_embedding:
__A : Dict = self.time_mlp(_UpperCAmelCase)
else:
__A : Dict = timestep_embed[..., None]
__A : Tuple = timestep_embed.repeat([1, 1, sample.shape[2]]).to(sample.dtype)
__A : List[Any] = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]))
# 2. down
__A : int = ()
for downsample_block in self.down_blocks:
__A ,__A : int = downsample_block(hidden_states=_UpperCAmelCase , temb=_UpperCAmelCase)
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
__A : Optional[int] = self.mid_block(_UpperCAmelCase , _UpperCAmelCase)
# 4. up
for i, upsample_block in enumerate(self.up_blocks):
__A : Any = down_block_res_samples[-1:]
__A : Optional[int] = down_block_res_samples[:-1]
__A : Any = upsample_block(_UpperCAmelCase , res_hidden_states_tuple=_UpperCAmelCase , temb=_UpperCAmelCase)
# 5. post-process
if self.out_block:
__A : Dict = self.out_block(_UpperCAmelCase , _UpperCAmelCase)
if not return_dict:
return (sample,)
return UNetaDOutput(sample=_UpperCAmelCase)
| 8
| 0
|
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"
)
_snake_case = None
_snake_case = {
"7B": 1_1008,
"13B": 1_3824,
"30B": 1_7920,
"65B": 2_2016,
"70B": 2_8672,
}
_snake_case = {
"7B": 1,
"7Bf": 1,
"13B": 2,
"13Bf": 2,
"30B": 4,
"65B": 8,
"70B": 8,
"70Bf": 8,
}
def snake_case ( _a: int , _a: Optional[int]=1 , _a: List[str]=256 )-> Union[str, Any]:
'''simple docstring'''
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def snake_case ( _a: Tuple )-> List[str]:
'''simple docstring'''
with open(__snake_case , 'r' ) as f:
return json.load(__snake_case )
def snake_case ( _a: Tuple , _a: Union[str, Any] )-> int:
'''simple docstring'''
with open(__snake_case , 'w' ) as f:
json.dump(__snake_case , __snake_case )
def snake_case ( _a: List[str] , _a: Dict , _a: Dict , _a: Optional[int]=True )-> List[Any]:
'''simple docstring'''
os.makedirs(__snake_case , exist_ok=__snake_case )
lowerCamelCase__ = os.path.join(__snake_case , 'tmp' )
os.makedirs(__snake_case , exist_ok=__snake_case )
lowerCamelCase__ = read_json(os.path.join(__snake_case , 'params.json' ) )
lowerCamelCase__ = NUM_SHARDS[model_size]
lowerCamelCase__ = params['n_layers']
lowerCamelCase__ = params['n_heads']
lowerCamelCase__ = n_heads // num_shards
lowerCamelCase__ = params['dim']
lowerCamelCase__ = dim // n_heads
lowerCamelCase__ = 10000.0
lowerCamelCase__ = 1.0 / (base ** (torch.arange(0 , __snake_case , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
lowerCamelCase__ = params['n_kv_heads'] # for GQA / MQA
lowerCamelCase__ = n_heads_per_shard // num_key_value_heads
lowerCamelCase__ = dim // num_key_value_heads
else: # compatibility with other checkpoints
lowerCamelCase__ = n_heads
lowerCamelCase__ = n_heads_per_shard
lowerCamelCase__ = dim
# permute for sliced rotary
def permute(_a: Dict , _a: Tuple=n_heads , _a: Any=dim , _a: Union[str, Any]=dim ):
return w.view(__snake_case , dima // n_heads // 2 , 2 , __snake_case ).transpose(1 , 2 ).reshape(__snake_case , __snake_case )
print(F'Fetching all parameters from the checkpoint at {input_base_path}.' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
lowerCamelCase__ = torch.load(os.path.join(__snake_case , 'consolidated.00.pth' ) , map_location='cpu' )
else:
# Sharded
lowerCamelCase__ = [
torch.load(os.path.join(__snake_case , F'consolidated.{i:02d}.pth' ) , map_location='cpu' )
for i in range(__snake_case )
]
lowerCamelCase__ = 0
lowerCamelCase__ = {'weight_map': {}}
for layer_i in range(__snake_case ):
lowerCamelCase__ = F'pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
lowerCamelCase__ = {
F'model.layers.{layer_i}.self_attn.q_proj.weight': permute(
loaded[F'layers.{layer_i}.attention.wq.weight'] ),
F'model.layers.{layer_i}.self_attn.k_proj.weight': permute(
loaded[F'layers.{layer_i}.attention.wk.weight'] ),
F'model.layers.{layer_i}.self_attn.v_proj.weight': loaded[F'layers.{layer_i}.attention.wv.weight'],
F'model.layers.{layer_i}.self_attn.o_proj.weight': loaded[F'layers.{layer_i}.attention.wo.weight'],
F'model.layers.{layer_i}.mlp.gate_proj.weight': loaded[F'layers.{layer_i}.feed_forward.w1.weight'],
F'model.layers.{layer_i}.mlp.down_proj.weight': loaded[F'layers.{layer_i}.feed_forward.w2.weight'],
F'model.layers.{layer_i}.mlp.up_proj.weight': loaded[F'layers.{layer_i}.feed_forward.w3.weight'],
F'model.layers.{layer_i}.input_layernorm.weight': loaded[F'layers.{layer_i}.attention_norm.weight'],
F'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[F'layers.{layer_i}.ffn_norm.weight'],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
lowerCamelCase__ = {
F'model.layers.{layer_i}.input_layernorm.weight': loaded[0][
F'layers.{layer_i}.attention_norm.weight'
].clone(),
F'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[0][
F'layers.{layer_i}.ffn_norm.weight'
].clone(),
}
lowerCamelCase__ = permute(
torch.cat(
[
loaded[i][F'layers.{layer_i}.attention.wq.weight'].view(__snake_case , __snake_case , __snake_case )
for i in range(__snake_case )
] , dim=0 , ).reshape(__snake_case , __snake_case ) )
lowerCamelCase__ = permute(
torch.cat(
[
loaded[i][F'layers.{layer_i}.attention.wk.weight'].view(
__snake_case , __snake_case , __snake_case )
for i in range(__snake_case )
] , dim=0 , ).reshape(__snake_case , __snake_case ) , __snake_case , __snake_case , __snake_case , )
lowerCamelCase__ = torch.cat(
[
loaded[i][F'layers.{layer_i}.attention.wv.weight'].view(
__snake_case , __snake_case , __snake_case )
for i in range(__snake_case )
] , dim=0 , ).reshape(__snake_case , __snake_case )
lowerCamelCase__ = torch.cat(
[loaded[i][F'layers.{layer_i}.attention.wo.weight'] for i in range(__snake_case )] , dim=1 )
lowerCamelCase__ = torch.cat(
[loaded[i][F'layers.{layer_i}.feed_forward.w1.weight'] for i in range(__snake_case )] , dim=0 )
lowerCamelCase__ = torch.cat(
[loaded[i][F'layers.{layer_i}.feed_forward.w2.weight'] for i in range(__snake_case )] , dim=1 )
lowerCamelCase__ = torch.cat(
[loaded[i][F'layers.{layer_i}.feed_forward.w3.weight'] for i in range(__snake_case )] , dim=0 )
lowerCamelCase__ = inv_freq
for k, v in state_dict.items():
lowerCamelCase__ = filename
param_count += v.numel()
torch.save(__snake_case , os.path.join(__snake_case , __snake_case ) )
lowerCamelCase__ = F'pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
lowerCamelCase__ = {
'model.embed_tokens.weight': loaded['tok_embeddings.weight'],
'model.norm.weight': loaded['norm.weight'],
'lm_head.weight': loaded['output.weight'],
}
else:
lowerCamelCase__ = {
'model.norm.weight': loaded[0]['norm.weight'],
'model.embed_tokens.weight': torch.cat(
[loaded[i]['tok_embeddings.weight'] for i in range(__snake_case )] , dim=1 ),
'lm_head.weight': torch.cat([loaded[i]['output.weight'] for i in range(__snake_case )] , dim=0 ),
}
for k, v in state_dict.items():
lowerCamelCase__ = filename
param_count += v.numel()
torch.save(__snake_case , os.path.join(__snake_case , __snake_case ) )
# Write configs
lowerCamelCase__ = {'total_size': param_count * 2}
write_json(__snake_case , os.path.join(__snake_case , 'pytorch_model.bin.index.json' ) )
lowerCamelCase__ = params['ffn_dim_multiplier'] if 'ffn_dim_multiplier' in params else 1
lowerCamelCase__ = params['multiple_of'] if 'multiple_of' in params else 256
lowerCamelCase__ = LlamaConfig(
hidden_size=__snake_case , intermediate_size=compute_intermediate_size(__snake_case , __snake_case , __snake_case ) , num_attention_heads=params['n_heads'] , num_hidden_layers=params['n_layers'] , rms_norm_eps=params['norm_eps'] , num_key_value_heads=__snake_case , )
config.save_pretrained(__snake_case )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('Loading the checkpoint in a Llama model.' )
lowerCamelCase__ = LlamaForCausalLM.from_pretrained(__snake_case , torch_dtype=torch.floataa , low_cpu_mem_usage=__snake_case )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('Saving in the Transformers format.' )
model.save_pretrained(__snake_case , safe_serialization=__snake_case )
shutil.rmtree(__snake_case )
def snake_case ( _a: List[str] , _a: Optional[int] )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(F'Saving a {tokenizer_class.__name__} to {tokenizer_path}.' )
lowerCamelCase__ = tokenizer_class(__snake_case )
tokenizer.save_pretrained(__snake_case )
def snake_case ( )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'--input_dir' , help='Location of LLaMA weights, which contains tokenizer.model and model folders' , )
parser.add_argument(
'--model_size' , choices=['7B', '7Bf', '13B', '13Bf', '30B', '65B', '70B', '70Bf', 'tokenizer_only'] , )
parser.add_argument(
'--output_dir' , help='Location to write HF model and tokenizer' , )
parser.add_argument('--safe_serialization' , type=__snake_case , help='Whether or not to save using `safetensors`.' )
lowerCamelCase__ = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
lowerCamelCase__ = os.path.join(args.input_dir , 'tokenizer.model' )
write_tokenizer(args.output_dir , __snake_case )
if __name__ == "__main__":
main()
| 707
|
"""simple docstring"""
def snake_case ( _a: List[Any] , _a: Any , _a: str , _a: List[Any] )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = [False] * len(_a )
lowerCamelCase__ = []
queue.append(_a )
lowerCamelCase__ = True
while queue:
lowerCamelCase__ = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_a )
lowerCamelCase__ = True
lowerCamelCase__ = u
return visited[t]
def snake_case ( _a: List[Any] , _a: str , _a: List[str] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = [-1] * (len(_a ))
lowerCamelCase__ = 0
while bfs(_a , _a , _a , _a ):
lowerCamelCase__ = float('Inf' )
lowerCamelCase__ = sink
while s != source:
# Find the minimum value in select path
lowerCamelCase__ = min(_a , graph[parent[s]][s] )
lowerCamelCase__ = parent[s]
max_flow += path_flow
lowerCamelCase__ = sink
while v != source:
lowerCamelCase__ = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowerCamelCase__ = parent[v]
return max_flow
_snake_case = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_snake_case , _snake_case = 0, 5
print(ford_fulkerson(graph, source, sink))
| 659
| 0
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase_ : int = 16
UpperCAmelCase_ : Union[str, Any] = 32
def A_ ( _lowerCAmelCase : Accelerator , _lowerCAmelCase : int = 16 ):
"""simple docstring"""
_lowerCamelCase : Tuple = AutoTokenizer.from_pretrained("bert-base-cased" )
_lowerCamelCase : List[str] = load_dataset("glue" , "mrpc" )
def tokenize_function(_lowerCAmelCase : List[str] ):
# max_length=None => use the model max length (it's actually the default)
_lowerCamelCase : str = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_lowerCamelCase : Optional[int] = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowerCamelCase : Optional[Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_lowerCAmelCase : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_lowerCamelCase : List[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_lowerCamelCase : List[Any] = 16
elif accelerator.mixed_precision != "no":
_lowerCamelCase : str = 8
else:
_lowerCamelCase : Optional[Any] = None
return tokenizer.pad(
_lowerCAmelCase , padding="longest" , max_length=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_tensors="pt" , )
# Instantiate dataloaders.
_lowerCamelCase : List[Any] = DataLoader(
tokenized_datasets["train"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
_lowerCamelCase : List[Any] = DataLoader(
tokenized_datasets["validation"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCAmelCase_ : List[Any] = mocked_dataloaders # noqa: F811
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , _lowerCAmelCase ) == "1":
_lowerCamelCase : Union[str, Any] = 2
# Initialize accelerator
_lowerCamelCase : Union[str, Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCamelCase : Tuple = config["lr"]
_lowerCamelCase : int = int(config["num_epochs"] )
_lowerCamelCase : Any = int(config["seed"] )
_lowerCamelCase : Tuple = int(config["batch_size"] )
_lowerCamelCase : Optional[int] = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
_lowerCamelCase : Union[str, Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_lowerCamelCase : List[Any] = batch_size // MAX_GPU_BATCH_SIZE
_lowerCamelCase : Any = MAX_GPU_BATCH_SIZE
set_seed(_lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : Optional[int] = get_dataloaders(_lowerCAmelCase , _lowerCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCamelCase : Dict = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_lowerCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_lowerCamelCase : Tuple = model.to(accelerator.device )
# Instantiate optimizer
_lowerCamelCase : Dict = AdamW(params=model.parameters() , lr=_lowerCAmelCase )
# Instantiate scheduler
_lowerCamelCase : Optional[int] = get_linear_schedule_with_warmup(
optimizer=_lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(_lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[int] = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Now we train the model
for epoch in range(_lowerCAmelCase ):
model.train()
for step, batch in enumerate(_lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_lowerCamelCase : Any = model(**_lowerCAmelCase )
_lowerCamelCase : Optional[Any] = outputs.loss
_lowerCamelCase : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(_lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
_lowerCamelCase : Union[str, Any] = 0
for step, batch in enumerate(_lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowerCamelCase : Any = model(**_lowerCAmelCase )
_lowerCamelCase : Dict = outputs.logits.argmax(dim=-1 )
_lowerCamelCase , _lowerCamelCase : int = accelerator.gather((predictions, batch["labels"]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(_lowerCAmelCase ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
_lowerCamelCase : Any = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_lowerCamelCase : Union[str, Any] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=_lowerCAmelCase , references=_lowerCAmelCase , )
_lowerCamelCase : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , _lowerCAmelCase )
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_lowerCAmelCase , default=_lowerCAmelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
_lowerCamelCase : str = parser.parse_args()
_lowerCamelCase : Optional[Any] = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 44
|
import collections
import importlib.util
import os
import re
from pathlib import Path
lowercase_ = """src/transformers"""
# Matches is_xxx_available()
lowercase_ = re.compile(R"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
lowercase_ = re.compile(R"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowercase_ = re.compile(R"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
lowercase_ = re.compile(R"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
lowercase_ = re.compile(R"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowercase_ = re.compile(R"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
lowercase_ = re.compile("""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
lowercase_ = re.compile("""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
lowercase_ = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
lowercase_ = re.compile(R"""^\s*try:""")
# Catches a line with else:
lowercase_ = re.compile(R"""^\s*else:""")
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
if _re_test_backend.search(_SCREAMING_SNAKE_CASE ) is None:
return None
lowercase__ = [b[0] for b in _re_backend.findall(_SCREAMING_SNAKE_CASE )]
backends.sort()
return "_and_".join(_SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int:
with open(_SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowercase__ = f.readlines()
lowercase__ = 0
while line_index < len(_SCREAMING_SNAKE_CASE ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_SCREAMING_SNAKE_CASE ):
return None
# First grab the objects without a specific backend in _import_structure
lowercase__ = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
lowercase__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_SCREAMING_SNAKE_CASE ):
lowercase__ = _re_one_line_import_struct.search(_SCREAMING_SNAKE_CASE ).groups()[0]
lowercase__ = re.findall('\[([^\]]+)\]' , _SCREAMING_SNAKE_CASE )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
lowercase__ = _re_import_struct_key_value.search(_SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
lowercase__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(_SCREAMING_SNAKE_CASE ) > 0]
objects.extend(_SCREAMING_SNAKE_CASE )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
lowercase__ = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowercase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
lowercase__ = lines[line_index]
if _re_import_struct_add_one.search(_SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_import_struct_add_one.search(_SCREAMING_SNAKE_CASE ).groups()[0] )
elif _re_import_struct_add_many.search(_SCREAMING_SNAKE_CASE ) is not None:
lowercase__ = _re_import_struct_add_many.search(_SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
lowercase__ = [obj[1:-1] for obj in imports if len(_SCREAMING_SNAKE_CASE ) > 0]
objects.extend(_SCREAMING_SNAKE_CASE )
elif _re_between_brackets.search(_SCREAMING_SNAKE_CASE ) is not None:
lowercase__ = _re_between_brackets.search(_SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
lowercase__ = [obj[1:-1] for obj in imports if len(_SCREAMING_SNAKE_CASE ) > 0]
objects.extend(_SCREAMING_SNAKE_CASE )
elif _re_quote_object.search(_SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_quote_object.search(_SCREAMING_SNAKE_CASE ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
lowercase__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowercase__ = []
while (
line_index < len(_SCREAMING_SNAKE_CASE )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
lowercase__ = lines[line_index]
lowercase__ = _re_import.search(_SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowercase__ = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(_SCREAMING_SNAKE_CASE ):
# If the line is an if is_backend_available, we grab all objects associated.
lowercase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
lowercase__ = lines[line_index]
lowercase__ = _re_import.search(_SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowercase__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
def find_duplicates(_SCREAMING_SNAKE_CASE ):
return [k for k, v in collections.Counter(_SCREAMING_SNAKE_CASE ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowercase__ = []
for key in import_dict_objects.keys():
lowercase__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
lowercase__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowercase__ = 'base imports' if key == 'none' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def __UpperCamelCase () -> Tuple:
lowercase__ = []
for root, _, files in os.walk(_SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
lowercase__ = os.path.join(_SCREAMING_SNAKE_CASE , '__init__.py' )
lowercase__ = parse_init(_SCREAMING_SNAKE_CASE )
if objects is not None:
lowercase__ = analyze_results(*_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
lowercase__ = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('\n'.join(_SCREAMING_SNAKE_CASE ) )
if len(_SCREAMING_SNAKE_CASE ) > 0:
raise ValueError('\n\n'.join(_SCREAMING_SNAKE_CASE ) )
def __UpperCamelCase () -> Optional[int]:
lowercase__ = []
for path, directories, files in os.walk(_SCREAMING_SNAKE_CASE ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(_SCREAMING_SNAKE_CASE )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_SCREAMING_SNAKE_CASE ) / folder).glob('*.py' ) ) ) == 0:
continue
lowercase__ = str((Path(_SCREAMING_SNAKE_CASE ) / folder).relative_to(_SCREAMING_SNAKE_CASE ) )
lowercase__ = short_path.replace(os.path.sep , '.' )
submodules.append(_SCREAMING_SNAKE_CASE )
for fname in files:
if fname == "__init__.py":
continue
lowercase__ = str((Path(_SCREAMING_SNAKE_CASE ) / fname).relative_to(_SCREAMING_SNAKE_CASE ) )
lowercase__ = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(_SCREAMING_SNAKE_CASE )
return submodules
lowercase_ = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
]
def __UpperCamelCase () -> List[Any]:
# This is to make sure the transformers module imported is the one in the repo.
lowercase__ = importlib.util.spec_from_file_location(
'transformers' , os.path.join(_SCREAMING_SNAKE_CASE , '__init__.py' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
lowercase__ = spec.loader.load_module()
lowercase__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(_SCREAMING_SNAKE_CASE ) > 0:
lowercase__ = '\n'.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registered in the main init of Transformers:\n'
F"""{list_of_modules}\n"""
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 235
| 0
|
'''simple docstring'''
def _UpperCamelCase ( __A , __A ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = len(__A )
UpperCamelCase__ = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
UpperCamelCase__ = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
UpperCamelCase__ = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
UpperCamelCase__ = subset[i - 1][j]
if arr[i - 1] <= j:
UpperCamelCase__ = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706
|
'''simple docstring'''
def _UpperCamelCase ( __A ) -> float:
'''simple docstring'''
if edge <= 0 or not isinstance(__A , __A ):
raise ValueError("Length must be a positive." )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def _UpperCamelCase ( __A ) -> float:
'''simple docstring'''
if edge <= 0 or not isinstance(__A , __A ):
raise ValueError("Length must be a positive." )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 223
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ : Tuple = {
"""configuration_table_transformer""": [
"""TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TableTransformerConfig""",
"""TableTransformerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Dict = [
"""TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TableTransformerForObjectDetection""",
"""TableTransformerModel""",
"""TableTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
snake_case__ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 23
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase = {
"""configuration_upernet""": ["""UperNetConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"""UperNetForSemanticSegmentation""",
"""UperNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 111
| 0
|
'''simple docstring'''
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _A:
"""simple docstring"""
@staticmethod
def UpperCAmelCase_ ( *_A , **_A ):
pass
@is_pipeline_test
@require_vision
class _A( unittest.TestCase ):
"""simple docstring"""
@require_torch
def UpperCAmelCase_ ( self ):
__A : Dict = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , )
__A : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
__A : Optional[Any] = image_classifier(A_ , candidate_labels=['a', 'b', 'c'] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(A_ ) , [
[{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'b'}, {'score': 0.3_3_3, 'label': 'c'}],
[{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'c'}, {'score': 0.3_3_3, 'label': 'b'}],
] , )
__A : Union[str, Any] = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2 )
self.assertEqual(
nested_simplify(A_ ) , [
[
{'score': 0.3_3_3, 'label': ANY(A_ )},
{'score': 0.3_3_3, 'label': ANY(A_ )},
{'score': 0.3_3_3, 'label': ANY(A_ )},
],
[
{'score': 0.3_3_3, 'label': ANY(A_ )},
{'score': 0.3_3_3, 'label': ANY(A_ )},
{'score': 0.3_3_3, 'label': ANY(A_ )},
],
[
{'score': 0.3_3_3, 'label': ANY(A_ )},
{'score': 0.3_3_3, 'label': ANY(A_ )},
{'score': 0.3_3_3, 'label': ANY(A_ )},
],
[
{'score': 0.3_3_3, 'label': ANY(A_ )},
{'score': 0.3_3_3, 'label': ANY(A_ )},
{'score': 0.3_3_3, 'label': ANY(A_ )},
],
[
{'score': 0.3_3_3, 'label': ANY(A_ )},
{'score': 0.3_3_3, 'label': ANY(A_ )},
{'score': 0.3_3_3, 'label': ANY(A_ )},
],
] , )
@require_tf
def UpperCAmelCase_ ( self ):
__A : Dict = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , framework='tf' )
__A : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
__A : Optional[int] = image_classifier(A_ , candidate_labels=['a', 'b', 'c'] )
self.assertEqual(
nested_simplify(A_ ) , [{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'b'}, {'score': 0.3_3_3, 'label': 'c'}] , )
__A : Union[str, Any] = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2 )
self.assertEqual(
nested_simplify(A_ ) , [
[
{'score': 0.3_3_3, 'label': ANY(A_ )},
{'score': 0.3_3_3, 'label': ANY(A_ )},
{'score': 0.3_3_3, 'label': ANY(A_ )},
],
[
{'score': 0.3_3_3, 'label': ANY(A_ )},
{'score': 0.3_3_3, 'label': ANY(A_ )},
{'score': 0.3_3_3, 'label': ANY(A_ )},
],
[
{'score': 0.3_3_3, 'label': ANY(A_ )},
{'score': 0.3_3_3, 'label': ANY(A_ )},
{'score': 0.3_3_3, 'label': ANY(A_ )},
],
[
{'score': 0.3_3_3, 'label': ANY(A_ )},
{'score': 0.3_3_3, 'label': ANY(A_ )},
{'score': 0.3_3_3, 'label': ANY(A_ )},
],
[
{'score': 0.3_3_3, 'label': ANY(A_ )},
{'score': 0.3_3_3, 'label': ANY(A_ )},
{'score': 0.3_3_3, 'label': ANY(A_ )},
],
] , )
@slow
@require_torch
def UpperCAmelCase_ ( self ):
__A : Dict = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , )
# This is an image of 2 cats with remotes and no planes
__A : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
__A : Dict = image_classifier(A_ , candidate_labels=['cat', 'plane', 'remote'] )
self.assertEqual(
nested_simplify(A_ ) , [
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
] , )
__A : int = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2 )
self.assertEqual(
nested_simplify(A_ ) , [
[
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
],
]
* 5 , )
@slow
@require_tf
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , framework='tf' )
# This is an image of 2 cats with remotes and no planes
__A : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
__A : Tuple = image_classifier(A_ , candidate_labels=['cat', 'plane', 'remote'] )
self.assertEqual(
nested_simplify(A_ ) , [
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
] , )
__A : List[Any] = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2 )
self.assertEqual(
nested_simplify(A_ ) , [
[
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
],
]
* 5 , )
| 700
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class _A( snake_case__ ):
"""simple docstring"""
def __init__( self , _A ):
__A : Any = data
def __iter__( self ):
for element in self.data:
yield element
def _SCREAMING_SNAKE_CASE ( a=True ) -> Any:
__A : List[Any] = Accelerator(even_batches=a )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def _SCREAMING_SNAKE_CASE ( a , a , a , a = False ) -> str:
if iterable:
__A : int = DummyIterableDataset(torch.as_tensor(range(a ) ) )
else:
__A : Optional[Any] = TensorDataset(torch.as_tensor(range(a ) ) )
__A : Optional[Any] = DataLoader(a , batch_size=a )
__A : Optional[int] = accelerator.prepare(a )
return dl
def _SCREAMING_SNAKE_CASE ( a , a , a , a , a , ) -> Union[str, Any]:
__A : Optional[int] = create_dataloader(accelerator=a , dataset_size=a , batch_size=a )
__A : Tuple = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
__A : int = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
a , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
a , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
__A : str = create_accelerator(even_batches=a )
verify_dataloader_batch_sizes(
a , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
a , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def _SCREAMING_SNAKE_CASE ( ) -> str:
__A : Optional[Any] = create_accelerator(even_batches=a )
__A : str = torch.nn.Linear(1 , 1 )
__A : Optional[int] = accelerator.prepare(a )
__A : Optional[int] = create_dataloader(a , dataset_size=3 , batch_size=1 )
__A : str = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(a ):
__A : Dict = ddp_model(batch[0].float() )
__A : List[str] = output.sum()
loss.backward()
batch_idxs.append(a )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def _SCREAMING_SNAKE_CASE ( a ) -> List[Any]:
with warnings.catch_warnings(record=a ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , a )
assert "only supported for multi-GPU" in str(w[-1].message )
def _SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
__A : int = True
__A : Union[str, Any] = False
__A : Optional[int] = create_accelerator(even_batches=a )
__A : int = torch.nn.Linear(1 , 1 )
__A : List[Any] = accelerator.prepare(a )
__A : int = create_dataloader(a , dataset_size=3 , batch_size=1 )
__A : Optional[int] = create_dataloader(a , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=a ):
__A : List[str] = train_dl.batch_sampler.even_batches
__A : Dict = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
__A : Any = True
__A : List[Any] = False
__A : Tuple = create_accelerator(even_batches=a )
__A : List[str] = torch.nn.Linear(1 , 1 )
__A : Optional[Any] = accelerator.prepare(a )
create_dataloader(a , dataset_size=3 , batch_size=1 , iterable=a )
__A : int = create_dataloader(a , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings('ignore' )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=a ):
__A : Tuple = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def _SCREAMING_SNAKE_CASE ( ) -> Dict:
__A : Any = create_accelerator()
__A : Union[str, Any] = torch.nn.Linear(1 , 1 )
__A : str = accelerator.prepare(a )
create_dataloader(a , dataset_size=3 , batch_size=1 , iterable=a )
with warnings.catch_warnings(record=a ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=a ):
pass
assert issubclass(w[-1].category , a )
assert "only supported for map-style datasets" in str(w[-1].message )
def _SCREAMING_SNAKE_CASE ( ) -> List[str]:
__A : str = create_accelerator()
accelerator.print('Test that even_batches variable ensures uniform batches across processes' )
test_default_ensures_even_batch_sizes()
accelerator.print('Run tests with even_batches disabled' )
test_can_disable_even_batches()
accelerator.print('Test joining uneven inputs' )
test_can_join_uneven_inputs()
accelerator.print('Test overriding even_batches when joining uneven inputs' )
test_join_can_override_even_batches()
accelerator.print('Test overriding even_batches for mixed dataloader types' )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print('Test overriding even_batches raises a warning for iterable dataloaders' )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print('Test join with non DDP distributed raises warning' )
__A : int = accelerator.state.distributed_type
__A : Tuple = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(a )
__A : str = original_state
if __name__ == "__main__":
main()
| 77
| 0
|
'''simple docstring'''
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
UpperCamelCase__ = logging.get_logger(__name__)
class lowerCamelCase_ :
def __init__( self : List[Any] , _A : Dict = None , _A : int = None , _A : Union[str, Any]=None , _A : Tuple=None ):
'''simple docstring'''
if not conversation_id:
UpperCAmelCase__ : Tuple = uuid.uuida()
if past_user_inputs is None:
UpperCAmelCase__ : Tuple = []
if generated_responses is None:
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : List[Any] = conversation_id
UpperCAmelCase__ : Union[str, Any] = past_user_inputs
UpperCAmelCase__ : int = generated_responses
UpperCAmelCase__ : List[str] = text
def __eq__( self : List[Any] , _A : str ):
'''simple docstring'''
if not isinstance(A__ , A__ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowercase_ ( self : Optional[int] , _A : Optional[int] , _A : Dict = False ):
'''simple docstring'''
if self.new_user_input:
if overwrite:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
f"""with: \"{text}\".""" )
UpperCAmelCase__ : Optional[int] = text
else:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
f"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" )
else:
UpperCAmelCase__ : List[Any] = text
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
UpperCAmelCase__ : Optional[int] = None
def lowercase_ ( self : List[str] , _A : Optional[int] ):
'''simple docstring'''
self.generated_responses.append(A__ )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = f"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
UpperCAmelCase__ : str = '''user''' if is_user else '''bot'''
output += f"""{name} >> {text} \n"""
return output
@add_end_docstrings(
__a , R'\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ' , )
class lowerCamelCase_ ( __a ):
def __init__( self : int , *_A : Dict , **_A : Tuple ):
'''simple docstring'''
super().__init__(*A__ , **A__ )
if self.tokenizer.pad_token_id is None:
UpperCAmelCase__ : Union[str, Any] = self.tokenizer.eos_token
def lowercase_ ( self : Tuple , _A : List[str]=None , _A : List[str]=None , _A : Optional[int]=None , **_A : str ):
'''simple docstring'''
UpperCAmelCase__ : Any = {}
UpperCAmelCase__ : str = {}
UpperCAmelCase__ : Union[str, Any] = {}
if min_length_for_response is not None:
UpperCAmelCase__ : int = min_length_for_response
if minimum_tokens is not None:
UpperCAmelCase__ : Optional[int] = minimum_tokens
if "max_length" in generate_kwargs:
UpperCAmelCase__ : Optional[Any] = generate_kwargs['''max_length''']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
UpperCAmelCase__ : Optional[Any] = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(A__ )
return preprocess_params, forward_params, postprocess_params
def __call__( self : Union[str, Any] , _A : str , _A : Union[str, Any]=0 , **_A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = super().__call__(A__ , num_workers=A__ , **A__ )
if isinstance(A__ , A__ ) and len(A__ ) == 1:
return outputs[0]
return outputs
def lowercase_ ( self : List[Any] , _A : Optional[Any] , _A : int=32 ):
'''simple docstring'''
if not isinstance(A__ , A__ ):
raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' )
if conversation.new_user_input is None:
raise ValueError(
f"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """
'''Add user inputs with the conversation\'s `add_user_input` method''' )
if hasattr(self.tokenizer , '''_build_conversation_input_ids''' ):
UpperCAmelCase__ : Optional[Any] = self.tokenizer._build_conversation_input_ids(A__ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
UpperCAmelCase__ : int = self._legacy_parse_and_tokenize(A__ )
if self.framework == "pt":
UpperCAmelCase__ : Any = torch.LongTensor([input_ids] )
elif self.framework == "tf":
UpperCAmelCase__ : int = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowercase_ ( self : Union[str, Any] , _A : Optional[int] , _A : Any=10 , **_A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = generate_kwargs.get('''max_length''' , self.model.config.max_length )
UpperCAmelCase__ : Union[str, Any] = model_inputs['''input_ids'''].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" )
UpperCAmelCase__ : List[Any] = max_length - minimum_tokens
UpperCAmelCase__ : str = model_inputs['''input_ids'''][:, -trim:]
if "attention_mask" in model_inputs:
UpperCAmelCase__ : Tuple = model_inputs['''attention_mask'''][:, -trim:]
UpperCAmelCase__ : str = model_inputs.pop('''conversation''' )
UpperCAmelCase__ : Dict = max_length
UpperCAmelCase__ : int = self.model.generate(**A__ , **A__ )
if self.model.config.is_encoder_decoder:
UpperCAmelCase__ : List[str] = 1
else:
UpperCAmelCase__ : List[Any] = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowercase_ ( self : int , _A : str , _A : Dict=True ):
'''simple docstring'''
UpperCAmelCase__ : Any = model_outputs['''output_ids''']
UpperCAmelCase__ : Tuple = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=A__ , clean_up_tokenization_spaces=A__ , )
UpperCAmelCase__ : str = model_outputs['''conversation''']
conversation.mark_processed()
conversation.append_response(A__ )
return conversation
def lowercase_ ( self : Any , _A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.tokenizer.eos_token_id
UpperCAmelCase__ : Dict = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(A__ , add_special_tokens=A__ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(A__ , add_special_tokens=A__ ) )
if len(A__ ) > self.tokenizer.model_max_length:
UpperCAmelCase__ : Any = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 75
|
'''simple docstring'''
def __UpperCamelCase ( a : int = 50 ) ->int:
snake_case = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'{solution() = }')
| 342
| 0
|
'''simple docstring'''
def A__ ( __lowerCAmelCase : int = 400_0000 ):
lowerCamelCase__ = [0, 1]
lowerCamelCase__ = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowerCamelCase__ = 0
for j in range(len(__lowerCAmelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F'{solution() = }')
| 9
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : List[str] = {
'naver-clova-ix/donut-base': 'https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json',
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'donut-swin'
_UpperCamelCase = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self ,_lowerCAmelCase=2_24 ,_lowerCAmelCase=4 ,_lowerCAmelCase=3 ,_lowerCAmelCase=96 ,_lowerCAmelCase=[2, 2, 6, 2] ,_lowerCAmelCase=[3, 6, 12, 24] ,_lowerCAmelCase=7 ,_lowerCAmelCase=4.0 ,_lowerCAmelCase=True ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=False ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=1E-5 ,**_lowerCAmelCase ,):
super().__init__(**_lowerCAmelCase )
lowerCamelCase__ = image_size
lowerCamelCase__ = patch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = embed_dim
lowerCamelCase__ = depths
lowerCamelCase__ = len(_lowerCAmelCase )
lowerCamelCase__ = num_heads
lowerCamelCase__ = window_size
lowerCamelCase__ = mlp_ratio
lowerCamelCase__ = qkv_bias
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = drop_path_rate
lowerCamelCase__ = hidden_act
lowerCamelCase__ = use_absolute_embeddings
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCamelCase__ = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
| 9
| 1
|
from math import factorial
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> int:
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(snake_case__ ) // (factorial(snake_case__ ) * factorial(n - k ))
if __name__ == "__main__":
print(
'''The number of five-card hands possible from a standard''',
f'fifty-two card deck is: {combinations(5_2, 5)}\n',
)
print(
'''If a class of 40 students must be arranged into groups of''',
f'4 for group projects, there are {combinations(4_0, 4)} ways',
'''to arrange them.\n''',
)
print(
'''If 10 teams are competing in a Formula One race, there''',
f'are {combinations(1_0, 3)} ways that first, second and',
'''third place can be awarded.''',
)
| 312
|
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
SCREAMING_SNAKE_CASE: str = '''bart'''
SCREAMING_SNAKE_CASE: int = True
@st.cache(allow_output_mutation=lowerCAmelCase )
def _a ( )-> int:
if LOAD_DENSE_INDEX:
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' )
SCREAMING_SNAKE_CASE_ = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' )
SCREAMING_SNAKE_CASE_ = qar_model.eval()
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = (None, None)
if MODEL_TYPE == "bart":
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained('yjernite/bart_eli5' )
SCREAMING_SNAKE_CASE_ = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' )
SCREAMING_SNAKE_CASE_ = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' )
sas_model.load_state_dict(save_dict['model'] )
SCREAMING_SNAKE_CASE_ = sas_model.eval()
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = make_qa_sas_model(
model_name='t5-small' , from_file='seq2seq_models/eli5_t5_model_1024_4.pth' , device='cuda:0' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=lowerCAmelCase )
def _a ( )-> Union[str, Any]:
if LOAD_DENSE_INDEX:
SCREAMING_SNAKE_CASE_ = faiss.StandardGpuResources()
SCREAMING_SNAKE_CASE_ = datasets.load_dataset(path='wiki_snippets' , name='wiki40b_en_100_0' )['train']
SCREAMING_SNAKE_CASE_ = np.memmap(
'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' , dtype='float32' , mode='r' , shape=(wikiaab_passages.num_rows, 128) , )
SCREAMING_SNAKE_CASE_ = faiss.IndexFlatIP(128 )
SCREAMING_SNAKE_CASE_ = faiss.index_cpu_to_gpu(lowerCAmelCase , 1 , lowerCAmelCase )
wikiaab_gpu_index_flat.add(lowerCAmelCase ) # TODO fix for larger GPU
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = (None, None)
SCREAMING_SNAKE_CASE_ = Elasticsearch([{'host': 'localhost', 'port': '9200'}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=lowerCAmelCase )
def _a ( )-> Optional[Any]:
SCREAMING_SNAKE_CASE_ = datasets.load_dataset('eli5' , name='LFQA_reddit' )
SCREAMING_SNAKE_CASE_ = elia['train_eli5']
SCREAMING_SNAKE_CASE_ = np.memmap(
'eli5_questions_reps.dat' , dtype='float32' , mode='r' , shape=(elia_train.num_rows, 128) )
SCREAMING_SNAKE_CASE_ = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(lowerCAmelCase )
return (elia_train, eli5_train_q_index)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE: Optional[Any] = load_indexes()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE: List[Any] = load_models()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE: List[str] = load_train_data()
def _a ( lowerCAmelCase , lowerCAmelCase=10 )-> Tuple:
SCREAMING_SNAKE_CASE_ = embed_questions_for_retrieval([question] , lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = eli5_train_q_index.search(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = [elia_train[int(lowerCAmelCase )] for i in I[0]]
return nn_examples
def _a ( lowerCAmelCase , lowerCAmelCase="wiki40b" , lowerCAmelCase="dense" , lowerCAmelCase=10 )-> Union[str, Any]:
if source == "none":
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = (' <P> '.join(['' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = query_qa_dense_index(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = query_es_index(
lowerCAmelCase , lowerCAmelCase , index_name='english_wiki40b_snippets_100w' , n_results=lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = [
(res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst
]
SCREAMING_SNAKE_CASE_ = 'question: {} context: {}'.format(lowerCAmelCase , lowerCAmelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda lowerCAmelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda lowerCAmelCase : None),
} )
def _a ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=64 , lowerCAmelCase=256 , lowerCAmelCase=False , lowerCAmelCase=2 , lowerCAmelCase=0.9_5 , lowerCAmelCase=0.8 )-> Tuple:
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = qa_sas_generate(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , num_answers=1 , num_beams=lowerCAmelCase , min_len=lowerCAmelCase , max_len=lowerCAmelCase , do_sample=lowerCAmelCase , temp=lowerCAmelCase , top_p=lowerCAmelCase , top_k=lowerCAmelCase , max_input_length=1024 , device='cuda:0' , )[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
SCREAMING_SNAKE_CASE: List[Any] = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'''
SCREAMING_SNAKE_CASE: List[Any] = '''
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
''' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
SCREAMING_SNAKE_CASE: Union[str, Any] = '''
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
'''
st.sidebar.markdown(description, unsafe_allow_html=True)
SCREAMING_SNAKE_CASE: Union[str, Any] = [
'''Answer the question''',
'''View the retrieved document only''',
'''View the most similar ELI5 question and answer''',
'''Show me everything, please!''',
]
SCREAMING_SNAKE_CASE: str = st.sidebar.checkbox('''Demo options''')
if demo_options:
SCREAMING_SNAKE_CASE: Union[str, Any] = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
SCREAMING_SNAKE_CASE: Optional[Any] = action_list.index(action_st)
SCREAMING_SNAKE_CASE: Tuple = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
SCREAMING_SNAKE_CASE: Optional[int] = show_type == '''Show full text of passages'''
else:
SCREAMING_SNAKE_CASE: List[str] = 3
SCREAMING_SNAKE_CASE: Tuple = True
SCREAMING_SNAKE_CASE: Any = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
SCREAMING_SNAKE_CASE: int = '''
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
'''
st.sidebar.markdown(retriever_info)
SCREAMING_SNAKE_CASE: Dict = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
SCREAMING_SNAKE_CASE: List[Any] = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
SCREAMING_SNAKE_CASE: List[Any] = '''wiki40b'''
SCREAMING_SNAKE_CASE: Tuple = '''dense'''
SCREAMING_SNAKE_CASE: int = '''beam'''
SCREAMING_SNAKE_CASE: List[Any] = 2
SCREAMING_SNAKE_CASE: Optional[int] = 6_4
SCREAMING_SNAKE_CASE: Tuple = 2_5_6
SCREAMING_SNAKE_CASE: Optional[Any] = None
SCREAMING_SNAKE_CASE: int = None
SCREAMING_SNAKE_CASE: List[str] = st.sidebar.checkbox('''Generation options''')
if generate_options:
SCREAMING_SNAKE_CASE: Optional[Any] = '''
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
'''
st.sidebar.markdown(generate_info)
SCREAMING_SNAKE_CASE: List[str] = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
SCREAMING_SNAKE_CASE: Optional[Any] = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=2_5_6, value=6_4, step=8, format=None, key=None
)
SCREAMING_SNAKE_CASE: Dict = st.sidebar.slider(
'''Maximum generation length''', min_value=6_4, max_value=5_1_2, value=2_5_6, step=1_6, format=None, key=None
)
if sampled == "beam":
SCREAMING_SNAKE_CASE: Union[str, Any] = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
SCREAMING_SNAKE_CASE: Union[str, Any] = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
SCREAMING_SNAKE_CASE: Tuple = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
SCREAMING_SNAKE_CASE: Optional[Any] = None
# start main text
SCREAMING_SNAKE_CASE: List[Any] = [
'''<MY QUESTION>''',
'''How do people make chocolate?''',
'''Why do we get a fever when we are sick?''',
'''How can different animals perceive different colors?''',
'''What is natural language processing?''',
'''What\'s the best way to treat a sunburn?''',
'''What exactly are vitamins ?''',
'''How does nuclear energy provide electricity?''',
'''What\'s the difference between viruses and bacteria?''',
'''Why are flutes classified as woodwinds when most of them are made out of metal ?''',
'''Why do people like drinking coffee even though it tastes so bad?''',
'''What happens when wine ages? How does it make the wine taste better?''',
'''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''',
'''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''',
'''How does New Zealand have so many large bird predators?''',
]
SCREAMING_SNAKE_CASE: List[Any] = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
SCREAMING_SNAKE_CASE: Dict = st.text_input('''Enter your question here:''', '''''')
else:
SCREAMING_SNAKE_CASE: int = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE: List[str] = make_support(question, source=wiki_source, method='''dense''', n_results=1_0)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE: List[Any] = make_support(question, source=wiki_source, method='''sparse''', n_results=1_0)
SCREAMING_SNAKE_CASE: Tuple = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
SCREAMING_SNAKE_CASE: List[Any] = support_list[:1_0]
SCREAMING_SNAKE_CASE: Union[str, Any] = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list])
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE: Optional[int] = make_support(question, source=wiki_source, method=index_type, n_results=1_0)
if action in [0, 3]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE: Tuple = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
SCREAMING_SNAKE_CASE: Dict = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_'''))
SCREAMING_SNAKE_CASE: Dict = res[1].strip()
if sec_titles == "":
SCREAMING_SNAKE_CASE: List[Any] = '''[{}]({})'''.format(res[0], wiki_url)
else:
SCREAMING_SNAKE_CASE: Optional[int] = sec_titles.split(''' & ''')
SCREAMING_SNAKE_CASE: str = ''' & '''.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
SCREAMING_SNAKE_CASE: Dict = find_nearest_training(question)
SCREAMING_SNAKE_CASE: int = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
SCREAMING_SNAKE_CASE: List[Any] = [
'''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
SCREAMING_SNAKE_CASE: str = '''
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
'''
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 360
| 0
|
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
a :List[str] = True
except ImportError:
a :Tuple = False
try:
from torch.hub import _get_torch_home
a :Optional[int] = _get_torch_home()
except ImportError:
a :List[Any] = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
a :Optional[Any] = os.path.join(torch_cache_home, "transformers")
a :Union[str, Any] = "https://cdn.huggingface.co"
a :str = "https://s3.amazonaws.com/models.huggingface.co/bert"
a :str = "/".join(str(Path(__file__).resolve()).split("/")[:-1])
a :Optional[Any] = os.path.join(PATH, "config.yaml")
a :Any = os.path.join(PATH, "attributes.txt")
a :Optional[Any] = os.path.join(PATH, "objects.txt")
a :Tuple = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
a :Optional[Any] = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
a :Any = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
a :Optional[int] = "pytorch_model.bin"
a :Dict = "config.yaml"
def _lowercase ( __lowerCAmelCase=OBJECTS , __lowerCAmelCase=ATTRIBUTES ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] = []
with open(__lowerCAmelCase ) as f:
for object in f.readlines():
vg_classes.append(object.split(""",""" )[0].lower().strip() )
SCREAMING_SNAKE_CASE__ : Optional[int] = []
with open(__lowerCAmelCase ) as f:
for object in f.readlines():
vg_attrs.append(object.split(""",""" )[0].lower().strip() )
return vg_classes, vg_attrs
def _lowercase ( __lowerCAmelCase ) -> Dict:
SCREAMING_SNAKE_CASE__ : str = OrderedDict()
with open(__lowerCAmelCase , """rb""" ) as f:
SCREAMING_SNAKE_CASE__ : int = pkl.load(__lowerCAmelCase )["""model"""]
for k in copy.deepcopy(list(ckp.keys() ) ):
SCREAMING_SNAKE_CASE__ : List[Any] = ckp.pop(__lowerCAmelCase )
if isinstance(__lowerCAmelCase , np.ndarray ):
SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor(__lowerCAmelCase )
else:
assert isinstance(__lowerCAmelCase , torch.tensor ), type(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : str = v
return r
class __a :
'''simple docstring'''
_SCREAMING_SNAKE_CASE :str = {}
def __init__( self , _a , _a = "root" , _a=0 ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = name
SCREAMING_SNAKE_CASE__ : Optional[Any] = level
SCREAMING_SNAKE_CASE__ : Tuple = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
SCREAMING_SNAKE_CASE__ : Optional[Any] = copy.deepcopy(_a )
SCREAMING_SNAKE_CASE__ : List[str] = copy.deepcopy(_a )
if isinstance(_a , _a ):
SCREAMING_SNAKE_CASE__ : Tuple = Config(_a , name=_a , level=level + 1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = v
setattr(self , _a , _a )
SCREAMING_SNAKE_CASE__ : Tuple = d
def __repr__( self ) -> Optional[int]:
"""simple docstring"""
return str(list((self._pointer.keys()) ) )
def __setattr__( self , _a , _a ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = val
SCREAMING_SNAKE_CASE__ : str = val
SCREAMING_SNAKE_CASE__ : str = key.split(""".""" )
SCREAMING_SNAKE_CASE__ : str = len(_a ) - 1
SCREAMING_SNAKE_CASE__ : List[str] = self._pointer
if len(_a ) > 1:
for i, l in enumerate(_a ):
if hasattr(self , _a ) and isinstance(getattr(self , _a ) , _a ):
setattr(getattr(self , _a ) , """.""".join(levels[i:] ) , _a )
if l == last_level:
SCREAMING_SNAKE_CASE__ : Any = val
else:
SCREAMING_SNAKE_CASE__ : str = pointer[l]
def _a ( self ) -> List[str]:
"""simple docstring"""
return self._pointer
def _a ( self , _a , _a ) -> Dict:
"""simple docstring"""
with open(f'''{file_name}''' , """w""" ) as stream:
dump(_a , _a )
def _a ( self , _a , _a ) -> int:
"""simple docstring"""
with open(f'''{file_name}''' , """w""" ) as stream:
json.dump(_a , _a )
@staticmethod
def _a ( _a ) -> Optional[Any]:
"""simple docstring"""
with open(_a ) as stream:
SCREAMING_SNAKE_CASE__ : int = load(_a , Loader=_a )
return data
def __str__( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = """ """
if self._name != "root":
SCREAMING_SNAKE_CASE__ : List[Any] = f'''{t * (self._level-1)}{self._name}:\n'''
else:
SCREAMING_SNAKE_CASE__ : Tuple = """"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(_a , _a ):
r += f'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += f'''{t * (self._level)}{k}: {v} ({type(_a ).__name__})\n'''
SCREAMING_SNAKE_CASE__ : str = level
return r[:-1]
@classmethod
def _a ( cls , _a , **_a ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = cls.get_config_dict(_a , **_a )
return cls(_a )
@classmethod
def _a ( cls , _a , **_a ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = kwargs.pop("""cache_dir""" , _a )
SCREAMING_SNAKE_CASE__ : Tuple = kwargs.pop("""force_download""" , _a )
SCREAMING_SNAKE_CASE__ : Any = kwargs.pop("""resume_download""" , _a )
SCREAMING_SNAKE_CASE__ : List[str] = kwargs.pop("""proxies""" , _a )
SCREAMING_SNAKE_CASE__ : str = kwargs.pop("""local_files_only""" , _a )
if os.path.isdir(_a ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join(_a , _a )
elif os.path.isfile(_a ) or is_remote_url(_a ):
SCREAMING_SNAKE_CASE__ : List[str] = pretrained_model_name_or_path
else:
SCREAMING_SNAKE_CASE__ : str = hf_bucket_url(_a , filename=_a , use_cdn=_a )
try:
# Load from URL or cache if already cached
SCREAMING_SNAKE_CASE__ : Any = cached_path(
_a , cache_dir=_a , force_download=_a , proxies=_a , resume_download=_a , local_files_only=_a , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
SCREAMING_SNAKE_CASE__ : List[str] = Config.load_yaml(_a )
except EnvironmentError:
SCREAMING_SNAKE_CASE__ : Tuple = """Can't load config for"""
raise EnvironmentError(_a )
if resolved_config_file == config_file:
print("""loading configuration file from path""" )
else:
print("""loading configuration file cache""" )
return Config.load_yaml(_a ), kwargs
def _lowercase ( __lowerCAmelCase ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : int = torch.load("""dump.pt""" , map_location=in_tensor.device )
SCREAMING_SNAKE_CASE__ : Dict = in_tensor.numpy()
SCREAMING_SNAKE_CASE__ : Any = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(__lowerCAmelCase , __lowerCAmelCase , rtol=0.01 , atol=0.1 ), (
F'''{sum([1 for x in np.isclose(__lowerCAmelCase , __lowerCAmelCase , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'''
" element-wise mismatch"
)
raise Exception("""tensors are all good""" )
# Hugging face functions below
def _lowercase ( __lowerCAmelCase ) -> Dict:
SCREAMING_SNAKE_CASE__ : Optional[Any] = urlparse(__lowerCAmelCase )
return parsed.scheme in ("http", "https")
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True ) -> str:
SCREAMING_SNAKE_CASE__ : List[str] = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
SCREAMING_SNAKE_CASE__ : Tuple = """/""" not in model_id
if legacy_format:
return F'''{endpoint}/{model_id}-{filename}'''
else:
return F'''{endpoint}/{model_id}/{filename}'''
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=0 , __lowerCAmelCase=None , ) -> Tuple:
SCREAMING_SNAKE_CASE__ : List[str] = """python/{}""".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
ua += "; " + "; ".join("""{}/{}""".format(__lowerCAmelCase , __lowerCAmelCase ) for k, v in user_agent.items() )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
ua += "; " + user_agent
SCREAMING_SNAKE_CASE__ : List[str] = {"""user-agent""": ua}
if resume_size > 0:
SCREAMING_SNAKE_CASE__ : Dict = """bytes=%d-""" % (resume_size,)
SCREAMING_SNAKE_CASE__ : Optional[int] = requests.get(__lowerCAmelCase , stream=__lowerCAmelCase , proxies=__lowerCAmelCase , headers=__lowerCAmelCase )
if response.status_code == 416: # Range not satisfiable
return
SCREAMING_SNAKE_CASE__ : Optional[int] = response.headers.get("""Content-Length""" )
SCREAMING_SNAKE_CASE__ : Any = resume_size + int(__lowerCAmelCase ) if content_length is not None else None
SCREAMING_SNAKE_CASE__ : int = tqdm(
unit="""B""" , unit_scale=__lowerCAmelCase , total=__lowerCAmelCase , initial=__lowerCAmelCase , desc="""Downloading""" , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(__lowerCAmelCase ) )
temp_file.write(__lowerCAmelCase )
progress.close()
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=10 , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=False , ) -> Union[str, Any]:
if cache_dir is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = TRANSFORMERS_CACHE
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : List[Any] = str(__lowerCAmelCase )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : str = None
if not local_files_only:
try:
SCREAMING_SNAKE_CASE__ : Dict = requests.head(__lowerCAmelCase , allow_redirects=__lowerCAmelCase , proxies=__lowerCAmelCase , timeout=__lowerCAmelCase )
if response.status_code == 200:
SCREAMING_SNAKE_CASE__ : List[str] = response.headers.get("""ETag""" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
SCREAMING_SNAKE_CASE__ : Tuple = url_to_filename(__lowerCAmelCase , __lowerCAmelCase )
# get cache path to put the file
SCREAMING_SNAKE_CASE__ : Any = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(__lowerCAmelCase ):
return cache_path
else:
SCREAMING_SNAKE_CASE__ : List[Any] = [
file
for file in fnmatch.filter(os.listdir(__lowerCAmelCase ) , filename + """.*""" )
if not file.endswith(""".json""" ) and not file.endswith(""".lock""" )
]
if len(__lowerCAmelCase ) > 0:
return os.path.join(__lowerCAmelCase , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"""Cannot find the requested files in the cached path and outgoing traffic has been"""
""" disabled. To enable model look-ups and downloads online, set 'local_files_only'"""
""" to False.""" )
return None
# From now on, etag is not None.
if os.path.exists(__lowerCAmelCase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
SCREAMING_SNAKE_CASE__ : List[Any] = cache_path + """.lock"""
with FileLock(__lowerCAmelCase ):
# If the download just completed while the lock was activated.
if os.path.exists(__lowerCAmelCase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
SCREAMING_SNAKE_CASE__ : Dict = cache_path + """.incomplete"""
@contextmanager
def _resumable_file_manager():
with open(__lowerCAmelCase , """a+b""" ) as f:
yield f
SCREAMING_SNAKE_CASE__ : Tuple = _resumable_file_manager
if os.path.exists(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Dict = os.stat(__lowerCAmelCase ).st_size
else:
SCREAMING_SNAKE_CASE__ : str = 0
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = partial(tempfile.NamedTemporaryFile , dir=__lowerCAmelCase , delete=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"""%s not found in cache or force_download set to True, downloading to %s""" , __lowerCAmelCase , temp_file.name , )
http_get(
__lowerCAmelCase , __lowerCAmelCase , proxies=__lowerCAmelCase , resume_size=__lowerCAmelCase , user_agent=__lowerCAmelCase , )
os.replace(temp_file.name , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = {"""url""": url, """etag""": etag}
SCREAMING_SNAKE_CASE__ : Optional[Any] = cache_path + """.json"""
with open(__lowerCAmelCase , """w""" ) as meta_file:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
return cache_path
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase=None ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = url.encode("""utf-8""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = shaaaa(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = url_hash.hexdigest()
if etag:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = etag.encode("""utf-8""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = shaaaa(__lowerCAmelCase )
filename += "." + etag_hash.hexdigest()
if url.endswith(""".h5""" ):
filename += ".h5"
return filename
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=False , ) -> List[str]:
if cache_dir is None:
SCREAMING_SNAKE_CASE__ : str = TRANSFORMERS_CACHE
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Tuple = str(__lowerCAmelCase )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : int = str(__lowerCAmelCase )
if is_remote_url(__lowerCAmelCase ):
# URL, so get it from the cache (downloading if necessary)
SCREAMING_SNAKE_CASE__ : str = get_from_cache(
__lowerCAmelCase , cache_dir=__lowerCAmelCase , force_download=__lowerCAmelCase , proxies=__lowerCAmelCase , resume_download=__lowerCAmelCase , user_agent=__lowerCAmelCase , local_files_only=__lowerCAmelCase , )
elif os.path.exists(__lowerCAmelCase ):
# File, and it exists.
SCREAMING_SNAKE_CASE__ : Optional[Any] = url_or_filename
elif urlparse(__lowerCAmelCase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("""file {} not found""".format(__lowerCAmelCase ) )
else:
# Something unknown
raise ValueError("""unable to parse {} as a URL or as a local path""".format(__lowerCAmelCase ) )
if extract_compressed_file:
if not is_zipfile(__lowerCAmelCase ) and not tarfile.is_tarfile(__lowerCAmelCase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = os.path.split(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = output_file.replace(""".""" , """-""" ) + """-extracted"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
if os.path.isdir(__lowerCAmelCase ) and os.listdir(__lowerCAmelCase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
SCREAMING_SNAKE_CASE__ : Tuple = output_path + """.lock"""
with FileLock(__lowerCAmelCase ):
shutil.rmtree(__lowerCAmelCase , ignore_errors=__lowerCAmelCase )
os.makedirs(__lowerCAmelCase )
if is_zipfile(__lowerCAmelCase ):
with ZipFile(__lowerCAmelCase , """r""" ) as zip_file:
zip_file.extractall(__lowerCAmelCase )
zip_file.close()
elif tarfile.is_tarfile(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Tuple = tarfile.open(__lowerCAmelCase )
tar_file.extractall(__lowerCAmelCase )
tar_file.close()
else:
raise EnvironmentError("""Archive format of {} could not be identified""".format(__lowerCAmelCase ) )
return output_path_extracted
return output_path
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase="," ) -> Optional[Any]:
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
if os.path.isfile(__lowerCAmelCase ):
with open(__lowerCAmelCase ) as f:
SCREAMING_SNAKE_CASE__ : Optional[Any] = eval(f.read() )
else:
SCREAMING_SNAKE_CASE__ : str = requests.get(__lowerCAmelCase )
try:
SCREAMING_SNAKE_CASE__ : Dict = requests.json()
except Exception:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = req.content.decode()
assert data is not None, "could not connect"
try:
SCREAMING_SNAKE_CASE__ : Dict = eval(__lowerCAmelCase )
except Exception:
SCREAMING_SNAKE_CASE__ : Dict = data.split("""\n""" )
req.close()
return data
def _lowercase ( __lowerCAmelCase ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = requests.get(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : str = np.array(Image.open(BytesIO(response.content ) ) )
return img
def _lowercase ( __lowerCAmelCase ) -> int:
SCREAMING_SNAKE_CASE__ : str = url.split("""/""" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(__lowerCAmelCase )
with open(__lowerCAmelCase , """rb""" ) as stream:
SCREAMING_SNAKE_CASE__ : List[str] = pkl.load(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = weights.pop("""model""" )
SCREAMING_SNAKE_CASE__ : Any = {}
for k, v in model.items():
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.from_numpy(__lowerCAmelCase )
if "running_var" in k:
SCREAMING_SNAKE_CASE__ : List[Any] = torch.tensor([0] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = k.replace("""running_var""" , """num_batches_tracked""" )
SCREAMING_SNAKE_CASE__ : int = zero
return new
def _lowercase ( ) -> Optional[Any]:
print(F'''{os.path.abspath(os.path.join(__lowerCAmelCase , os.pardir ) )}/demo.ipynb''' )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase="RGB" ) -> Union[str, Any]:
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
if os.path.isfile(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = cva.imread(__lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = get_image_from_url(__lowerCAmelCase )
assert img is not None, F'''could not connect to: {im}'''
SCREAMING_SNAKE_CASE__ : Dict = cva.cvtColor(__lowerCAmelCase , cva.COLOR_BGR2RGB )
if input_format == "RGB":
SCREAMING_SNAKE_CASE__ : Optional[Any] = img[:, :, ::-1]
return img
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase=1 ) -> str:
return (images[i : i + batch] for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase ))
| 12
|
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Tuple = (DDPMScheduler,)
def _a ( self , **_a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = {
"""num_train_timesteps""": 1_000,
"""beta_start""": 0.0_001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**_a )
return config
def _a ( self ) -> str:
"""simple docstring"""
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_a )
def _a ( self ) -> str:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_a , beta_end=_a )
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_a )
def _a ( self ) -> Any:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_a )
def _a ( self ) -> Optional[int]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_a )
def _a ( self ) -> int:
"""simple docstring"""
self.check_over_configs(thresholding=_a )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_a , prediction_type=_a , sample_max_value=_a , )
def _a ( self ) -> str:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def _a ( self ) -> str:
"""simple docstring"""
for t in [0, 500, 999]:
self.check_over_forward(time_step=_a )
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : int = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : Dict = scheduler_class(**_a )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00_979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : int = scheduler_class(**_a )
SCREAMING_SNAKE_CASE__ : Any = len(_a )
SCREAMING_SNAKE_CASE__ : List[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE__ : str = self.dummy_sample_deter
SCREAMING_SNAKE_CASE__ : str = torch.manual_seed(0 )
for t in reversed(range(_a ) ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_a , _a )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE__ : int = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
SCREAMING_SNAKE_CASE__ : str = pred_prev_sample
SCREAMING_SNAKE_CASE__ : str = torch.sum(torch.abs(_a ) )
SCREAMING_SNAKE_CASE__ : Any = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 258.9_606 ) < 1E-2
assert abs(result_mean.item() - 0.3_372 ) < 1E-3
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : Tuple = self.get_scheduler_config(prediction_type="""v_prediction""" )
SCREAMING_SNAKE_CASE__ : List[str] = scheduler_class(**_a )
SCREAMING_SNAKE_CASE__ : Dict = len(_a )
SCREAMING_SNAKE_CASE__ : List[str] = self.dummy_model()
SCREAMING_SNAKE_CASE__ : List[str] = self.dummy_sample_deter
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.manual_seed(0 )
for t in reversed(range(_a ) ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE__ : int = model(_a , _a )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE__ : List[str] = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
SCREAMING_SNAKE_CASE__ : Tuple = pred_prev_sample
SCREAMING_SNAKE_CASE__ : Any = torch.sum(torch.abs(_a ) )
SCREAMING_SNAKE_CASE__ : int = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 202.0_296 ) < 1E-2
assert abs(result_mean.item() - 0.2_631 ) < 1E-3
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : Dict = scheduler_class(**_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = scheduler.timesteps
for i, timestep in enumerate(_a ):
if i == len(_a ) - 1:
SCREAMING_SNAKE_CASE__ : Optional[Any] = -1
else:
SCREAMING_SNAKE_CASE__ : Tuple = timesteps[i + 1]
SCREAMING_SNAKE_CASE__ : int = scheduler.previous_timestep(_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = prev_t.item()
self.assertEqual(_a , _a )
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : int = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : List[str] = scheduler_class(**_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = [100, 87, 50, 51, 0]
with self.assertRaises(_a , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=_a )
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : List[str] = scheduler_class(**_a )
SCREAMING_SNAKE_CASE__ : int = [100, 87, 50, 1, 0]
SCREAMING_SNAKE_CASE__ : List[str] = len(_a )
with self.assertRaises(_a , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=_a , timesteps=_a )
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = scheduler_class(**_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_a , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=_a )
| 12
| 1
|
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class snake_case ( unittest.TestCase ):
def __lowercase( self : Dict )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = logging.get_logger()
# the current default level is logging.WARNING
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(a_ )
def __lowercase( self : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_verbosity()
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger('transformers.models.bart.tokenization_bart' )
SCREAMING_SNAKE_CASE__ : Any = 'Testing 1, 2, 3'
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(a_ ) as cl:
logger.warning(a_ )
self.assertEqual(cl.out , msg + '\n' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(a_ ) as cl:
logger.warning(a_ )
self.assertEqual(cl.out , '' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(a_ ) as cl:
logger.warning(a_ )
self.assertEqual(cl.out , msg + '\n' )
# restore to the original level
logging.set_verbosity(a_ )
@mockenv(TRANSFORMERS_VERBOSITY='error' )
def __lowercase( self : List[Any] )-> Union[str, Any]:
"""simple docstring"""
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
SCREAMING_SNAKE_CASE__ : int = logging.get_logger('transformers.models.bart.tokenization_bart' )
SCREAMING_SNAKE_CASE__ : int = os.getenv('TRANSFORMERS_VERBOSITY' , a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.log_levels[env_level_str]
SCREAMING_SNAKE_CASE__ : str = logging.get_verbosity()
self.assertEqual(
a_ , a_ , F'''TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}''' , )
# restore to the original level
SCREAMING_SNAKE_CASE__ : List[Any] = ''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='super-error' )
def __lowercase( self : Any )-> Union[str, Any]:
"""simple docstring"""
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
SCREAMING_SNAKE_CASE__ : int = logging.logging.getLogger()
with CaptureLogger(a_ ) as cl:
# this action activates the env var
logging.get_logger('transformers.models.bart.tokenization_bart' )
self.assertIn('Unknown option TRANSFORMERS_VERBOSITY=super-error' , cl.out )
# no need to restore as nothing was changed
def __lowercase( self : int )-> Tuple:
"""simple docstring"""
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger('transformers.models.bart.tokenization_bart' )
SCREAMING_SNAKE_CASE__ : Dict = 'Testing 1, 2, 3'
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='1' ):
# nothing should be logged as env var disables this method
with CaptureLogger(a_ ) as cl:
logger.warning_advice(a_ )
self.assertEqual(cl.out , '' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(a_ ) as cl:
logger.warning_advice(a_ )
self.assertEqual(cl.out , msg + '\n' )
def _a ( ):
'''simple docstring'''
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 85
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
SCREAMING_SNAKE_CASE__ : Any = {"configuration_dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[str] = ["DPTFeatureExtractor"]
SCREAMING_SNAKE_CASE__ : Tuple = ["DPTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
"DPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DPTForDepthEstimation",
"DPTForSemanticSegmentation",
"DPTModel",
"DPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 85
| 1
|
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowerCAmelCase: Optional[int] = TypeVar('KEY')
lowerCAmelCase: Tuple = TypeVar('VAL')
@dataclass(frozen=lowerCamelCase__ , slots=lowerCamelCase__ )
class a__( Generic[KEY, VAL] ):
lowercase__ = 42
lowercase__ = 42
class a__( _Item ):
def __init__( self : Any ):
super().__init__(__snake_case , __snake_case )
def __bool__( self : int ):
return False
lowerCAmelCase: Any = _DeletedItem()
class a__( MutableMapping[KEY, VAL] ):
def __init__( self : int , __snake_case : int = 8 , __snake_case : float = 0.75 ):
a : Any = initial_block_size
a : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
a : int = capacity_factor
a : str = 0
def lowercase_ ( self : Any , __snake_case : KEY ):
return hash(__snake_case ) % len(self._buckets )
def lowercase_ ( self : Optional[Any] , __snake_case : int ):
return (ind + 1) % len(self._buckets )
def lowercase_ ( self : Union[str, Any] , __snake_case : int , __snake_case : KEY , __snake_case : VAL ):
a : List[str] = self._buckets[ind]
if not stored:
a : Optional[int] = _Item(__snake_case , __snake_case )
self._len += 1
return True
elif stored.key == key:
a : Optional[int] = _Item(__snake_case , __snake_case )
return True
else:
return False
def lowercase_ ( self : Union[str, Any] ):
a : Any = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__snake_case )
def lowercase_ ( self : Dict ):
if len(self._buckets ) <= self._initial_block_size:
return False
a : str = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def lowercase_ ( self : Optional[Any] , __snake_case : int ):
a : Tuple = self._buckets
a : Any = [None] * new_size
a : Tuple = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def lowercase_ ( self : Tuple ):
self._resize(len(self._buckets ) * 2 )
def lowercase_ ( self : Dict ):
self._resize(len(self._buckets ) // 2 )
def lowercase_ ( self : List[str] , __snake_case : KEY ):
a : Tuple = self._get_bucket_index(__snake_case )
for _ in range(len(self._buckets ) ):
yield ind
a : str = self._get_next_ind(__snake_case )
def lowercase_ ( self : Any , __snake_case : KEY , __snake_case : VAL ):
for ind in self._iterate_buckets(__snake_case ):
if self._try_set(__snake_case , __snake_case , __snake_case ):
break
def __setitem__( self : str , __snake_case : KEY , __snake_case : VAL ):
if self._is_full():
self._size_up()
self._add_item(__snake_case , __snake_case )
def __delitem__( self : str , __snake_case : KEY ):
for ind in self._iterate_buckets(__snake_case ):
a : Optional[Any] = self._buckets[ind]
if item is None:
raise KeyError(__snake_case )
if item is _deleted:
continue
if item.key == key:
a : str = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : Optional[int] , __snake_case : KEY ):
for ind in self._iterate_buckets(__snake_case ):
a : Dict = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__snake_case )
def __len__( self : Tuple ):
return self._len
def __iter__( self : Union[str, Any] ):
yield from (item.key for item in self._buckets if item)
def __repr__( self : Optional[Any] ):
a : List[str] = ' ,'.join(
F"""{item.key}: {item.val}""" for item in self._buckets if item )
return F"""HashMap({val_string})"""
| 195
|
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCAmelCase: Dict = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.14.0', 'To fix: pip install -r examples/pytorch/audio-classification/requirements.txt')
def lowerCamelCase__ ( _A , _A , _A = 1_6000 ):
a : int = int(round(sample_rate * max_length ) )
if len(_A ) <= sample_length:
return wav
a : List[str] = randint(0 , len(_A ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class a__:
lowercase__ = field(default=lowerCamelCase__ , metadata={"""help""": """Name of a dataset from the datasets package"""} )
lowercase__ = field(
default=lowerCamelCase__ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
lowercase__ = field(
default=lowerCamelCase__ , metadata={"""help""": """A file containing the training audio paths and labels."""} )
lowercase__ = field(
default=lowerCamelCase__ , metadata={"""help""": """A file containing the validation audio paths and labels."""} )
lowercase__ = field(
default="""train""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} , )
lowercase__ = field(
default="""validation""" , metadata={
"""help""": (
"""The name of the training data set split to use (via the datasets library). Defaults to 'validation'"""
)
} , )
lowercase__ = field(
default="""audio""" , metadata={"""help""": """The name of the dataset column containing the audio data. Defaults to 'audio'"""} , )
lowercase__ = field(
default="""label""" , metadata={"""help""": """The name of the dataset column containing the labels. Defaults to 'label'"""} )
lowercase__ = field(
default=lowerCamelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
lowercase__ = field(
default=lowerCamelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
lowercase__ = field(
default=20 , metadata={"""help""": """Audio clips will be randomly cut to this length during training if the value is set."""} , )
@dataclass
class a__:
lowercase__ = field(
default="""facebook/wav2vec2-base""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , )
lowercase__ = field(
default=lowerCamelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowercase__ = field(
default=lowerCamelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from the Hub"""} )
lowercase__ = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
lowercase__ = field(
default=lowerCamelCase__ , metadata={"""help""": """Name or path of preprocessor config."""} )
lowercase__ = field(
default=lowerCamelCase__ , metadata={"""help""": """Whether to freeze the feature encoder layers of the model."""} )
lowercase__ = field(
default=lowerCamelCase__ , metadata={"""help""": """Whether to generate an attention mask in the feature extractor."""} )
lowercase__ = field(
default=lowerCamelCase__ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
lowercase__ = field(
default=lowerCamelCase__ , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
lowercase__ = field(
default=lowerCamelCase__ , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def lowercase_ ( self : Optional[Any] ):
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'The argument `--freeze_feature_extractor` is deprecated and '
'will be removed in a future version. Use `--freeze_feature_encoder`'
'instead. Setting `freeze_feature_encoder==True`.' , __snake_case , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'The argument `--freeze_feature_extractor` is deprecated and '
'should not be used in combination with `--freeze_feature_encoder`.'
'Only make use of `--freeze_feature_encoder`.' )
def lowerCamelCase__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
a : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a , a , a : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a , a , a : List[str] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_audio_classification' , _A , _A )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
a : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(_A )
transformers.utils.logging.set_verbosity(_A )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
a : Tuple = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
a : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to train from scratch.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset and prepare it for the audio classification task.
a : Any = DatasetDict()
a : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
a : Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"""--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. """
'Make sure to set `--audio_column_name` to the correct audio column - one of '
f"""{", ".join(raw_datasets["train"].column_names )}.""" )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"""--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. """
'Make sure to set `--label_column_name` to the correct text column - one of '
f"""{", ".join(raw_datasets["train"].column_names )}.""" )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
a : int = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
a : int = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
a : Dict = feature_extractor.model_input_names[0]
def train_transforms(_A ):
a : Dict = []
for audio in batch[data_args.audio_column_name]:
a : int = random_subsample(
audio['array'] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(_A )
a : str = feature_extractor(_A , sampling_rate=feature_extractor.sampling_rate )
a : Dict = {model_input_name: inputs.get(_A )}
a : str = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(_A ):
a : List[str] = [audio['array'] for audio in batch[data_args.audio_column_name]]
a : Dict = feature_extractor(_A , sampling_rate=feature_extractor.sampling_rate )
a : int = {model_input_name: inputs.get(_A )}
a : Tuple = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
a : List[str] = raw_datasets['train'].features[data_args.label_column_name].names
a , a : Optional[Any] = {}, {}
for i, label in enumerate(_A ):
a : int = str(_A )
a : List[Any] = label
# Load the accuracy metric from the datasets package
a : int = evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(_A ):
a : Dict = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=_A , references=eval_pred.label_ids )
a : int = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_A ) , labelaid=_A , idalabel=_A , finetuning_task='audio-classification' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
a : Any = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_A , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
a : int = (
raw_datasets['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(_A , output_all_columns=_A )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
a : List[str] = (
raw_datasets['eval'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(_A , output_all_columns=_A )
# Initialize our trainer
a : List[Any] = Trainer(
model=_A , args=_A , train_dataset=raw_datasets['train'] if training_args.do_train else None , eval_dataset=raw_datasets['eval'] if training_args.do_eval else None , compute_metrics=_A , tokenizer=_A , )
# Training
if training_args.do_train:
a : Dict = None
if training_args.resume_from_checkpoint is not None:
a : Union[str, Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
a : int = last_checkpoint
a : Optional[Any] = trainer.train(resume_from_checkpoint=_A )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
a : List[Any] = trainer.evaluate()
trainer.log_metrics('eval' , _A )
trainer.save_metrics('eval' , _A )
# Write model card and (optionally) push to hub
a : Any = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'audio-classification',
'dataset': data_args.dataset_name,
'tags': ['audio-classification'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_A )
else:
trainer.create_model_card(**_A )
if __name__ == "__main__":
main()
| 195
| 1
|
import math
class UpperCAmelCase :
'''simple docstring'''
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
A_ : Optional[Any] = 0.0
A_ : Dict = 0.0
for i in range(len(lowercase ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
for i in range(len(lowercase ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def UpperCamelCase ( ):
'''simple docstring'''
A_ : int = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
A_ : str = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
A_ : List[str] = SelfOrganizingMap()
A_ : Optional[Any] = 3
A_ : Dict = 0.5
for _ in range(__lowercase ):
for j in range(len(__lowercase ) ):
# training sample
A_ : Optional[Any] = training_samples[j]
# Compute the winning vector
A_ : Optional[int] = self_organizing_map.get_winner(__lowercase ,__lowercase )
# Update the winning vector
A_ : Optional[Any] = self_organizing_map.update(__lowercase ,__lowercase ,__lowercase ,__lowercase )
# classify test sample
A_ : Optional[int] = [0, 0, 0, 1]
A_ : Dict = self_organizing_map.get_winner(__lowercase ,__lowercase )
# results
print(f'''Clusters that the test sample belongs to : {winner}''' )
print(f'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main()
| 558
|
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCAmelCase = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
_UpperCAmelCase = 50003
_UpperCAmelCase = 50002
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ = PLBartTokenizer
lowerCamelCase_ = None
lowerCamelCase_ = False
def lowerCAmelCase_ ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
A_ : str = PLBartTokenizer(lowercase , language_codes='base' , keep_accents=lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = PLBartTokenizer(lowercase , language_codes='base' , keep_accents=lowercase )
A_ : Union[str, Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowercase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
A_ : List[str] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
A_ : List[Any] = tokenizer.convert_tokens_to_ids(lowercase )
self.assertListEqual(
lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
A_ : Tuple = tokenizer.convert_ids_to_tokens(lowercase )
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
A_ : Optional[Any] = tokenizer.vocab_size
A_ : Dict = [tokenizer.convert_ids_to_tokens(lowercase ) for x in range(end - 4 , lowercase )]
self.assertListEqual(lowercase , ['__java__', '__python__', '__en_XX__', '<mask>'] )
A_ : Dict = 'java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'
A_ : Optional[Any] = tokenizer(lowercase ).input_ids
self.assertEqual(
tokenizer.decode(lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase ) , lowercase , )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = PLBartTokenizer(lowercase , language_codes='multi' , keep_accents=lowercase )
A_ : int = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowercase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
A_ : List[Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
A_ : int = tokenizer.convert_tokens_to_ids(lowercase )
self.assertListEqual(
lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
A_ : Tuple = tokenizer.convert_ids_to_tokens(lowercase )
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
A_ : Any = tokenizer.vocab_size
A_ : int = [tokenizer.convert_ids_to_tokens(lowercase ) for x in range(end - 7 , lowercase )]
self.assertListEqual(
lowercase , ['__java__', '__python__', '__en_XX__', '__javascript__', '__php__', '__ruby__', '__go__'] )
A_ : str = 'java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'
A_ : Dict = tokenizer(lowercase ).input_ids
self.assertEqual(
tokenizer.decode(lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase ) , lowercase , )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ = '''uclanlp/plbart-python-en_XX'''
lowerCamelCase_ = [
'''def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])''',
'''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''',
]
lowerCamelCase_ = [
'''Returns the maximum value of a b c.''',
'''Sums the values of a b c.''',
]
lowerCamelCase_ = [
1_3_4,
5_4_5_2,
3_3_4_6_0,
3_3_4_4_1,
3_3_4_6_3,
3_3_4_6_5,
3_3_4_6_3,
3_3_4_4_9,
9_8_8,
2_0,
3_3_4_5_6,
1_9,
3_3_4_5_6,
7_7_1,
3_9,
4_2_5_8,
8_8_9,
3_3_1_8,
3_3_4_4_1,
3_3_4_6_3,
3_3_4_6_5,
3_3_4_6_3,
3_3_4_4_9,
2_4_7_1,
2,
PYTHON_CODE,
]
@classmethod
def lowerCAmelCase_ ( cls ):
"""simple docstring"""
A_ : PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes='base' , src_lang='python' , tgt_lang='en_XX' )
A_ : Tuple = 1
return cls
def lowerCAmelCase_ ( self ):
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__java__'] , 5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__python__'] , 5_0_0_0_2 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__en_XX__'] , 5_0_0_0_3 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : int = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
self.assertIn(lowercase , self.tokenizer.all_special_ids )
A_ : List[Any] = [EN_CODE, 9_0_3_7, 3_3_4_4_2, 5_7, 7_5_2, 1_5_3, 1_4, 5_6, 1_8, 9, 2]
A_ : Tuple = self.tokenizer.decode(lowercase , skip_special_tokens=lowercase )
A_ : List[str] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowercase )
self.assertEqual(lowercase , lowercase )
self.assertNotIn(self.tokenizer.eos_token , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Any = ['def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])' * 2_0]
self.assertIsInstance(src_text[0] , lowercase )
A_ : int = 1_0
A_ : Dict = self.tokenizer(lowercase , max_length=lowercase , truncation=lowercase ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , lowercase )
self.assertEqual(len(lowercase ) , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', '__java__'] ) , [5_0_0_0_4, 5_0_0_0_1] )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = tempfile.mkdtemp()
A_ : Optional[Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowercase )
A_ : Union[str, Any] = PLBartTokenizer.from_pretrained(lowercase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowercase )
@require_torch
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowercase , return_tensors='pt' )
A_ : List[str] = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , lowercase )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowercase , truncation=lowercase , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
A_ : str = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual((2, 2_6) , batch.input_ids.shape )
self.assertEqual((2, 2_6) , batch.attention_mask.shape )
A_ : Optional[int] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowercase )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = self.tokenizer(self.src_text , padding=lowercase , truncation=lowercase , max_length=3 , return_tensors='pt' )
A_ : Optional[Any] = self.tokenizer(
text_target=self.tgt_text , padding=lowercase , truncation=lowercase , max_length=1_0 , return_tensors='pt' )
A_ : Optional[int] = targets['input_ids']
A_ : int = shift_tokens_right(lowercase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='java' )
self.assertEqual(
nested_simplify(lowercase ) , {
# A, test, EOS, en_XX
'input_ids': [[1_5_0, 2_4_2, 2, 5_0_0_0_3]],
'attention_mask': [[1, 1, 1, 1]],
# java
'forced_bos_token_id': 5_0_0_0_1,
} , )
| 558
| 1
|
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
__lowerCamelCase : str = '''\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
'''
__lowerCamelCase : Tuple = '''\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper "Evaluating Large Language Models Trained on Code"
(https://arxiv.org/abs/2107.03374).
'''
__lowerCamelCase : Optional[int] = '''
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric("code_eval")
>>> test_cases = ["assert add(2,3)==5"]
>>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{\'pass@1\': 0.5, \'pass@2\': 1.0}
'''
__lowerCamelCase : List[Any] = '''
################################################################################
!!!WARNING!!!
################################################################################
The "code_eval" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper "Evaluating Large
Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this
with:
>>> import os
>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"
################################################################################\
'''
__lowerCamelCase : str = '''The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
def __a ( self : Tuple ):
"""simple docstring"""
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Value("""string""" ),
} ) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , )
def __a ( self : List[Any] , _lowercase : Optional[Any] , _lowercase : Union[str, Any] , _lowercase : Dict=[1, 10, 1_00] , _lowercase : List[str]=4 , _lowercase : Optional[Any]=3.0 ):
"""simple docstring"""
if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("""This metric is currently not supported on Windows.""" )
with ThreadPoolExecutor(max_workers=_lowercase ) as executor:
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = Counter()
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = defaultdict(_lowercase )
for task_id, (candidates, test_case) in enumerate(zip(_lowercase , _lowercase ) ):
for candidate in candidates:
SCREAMING_SNAKE_CASE__ = candidate + """\n""" + test_case
SCREAMING_SNAKE_CASE__ = (test_program, timeout, task_id, completion_id[task_id])
SCREAMING_SNAKE_CASE__ = executor.submit(_lowercase , *_lowercase )
futures.append(_lowercase )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(_lowercase ):
SCREAMING_SNAKE_CASE__ = future.result()
results[result["task_id"]].append((result["""completion_id"""], result) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = [], []
for result in results.values():
result.sort()
SCREAMING_SNAKE_CASE__ = [r[1]["""passed"""] for r in result]
total.append(len(_lowercase ) )
correct.append(sum(_lowercase ) )
SCREAMING_SNAKE_CASE__ = np.array(_lowercase )
SCREAMING_SNAKE_CASE__ = np.array(_lowercase )
SCREAMING_SNAKE_CASE__ = k
SCREAMING_SNAKE_CASE__ = {f"""pass@{k}""": estimate_pass_at_k(_lowercase , _lowercase , _lowercase ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple ) -> int:
"""simple docstring"""
def estimator(__UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = itertools.repeat(__UpperCamelCase , len(__UpperCamelCase ) )
else:
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = iter(__UpperCamelCase )
return np.array([estimator(int(__UpperCamelCase ) , int(__UpperCamelCase ) , __UpperCamelCase ) for n, c in zip(__UpperCamelCase , __UpperCamelCase )] )
| 379
|
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class __snake_case ( lowerCamelCase_ ):
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def __a ( self : Tuple ):
"""simple docstring"""
with self.assertRaises(_lowercase ):
SCREAMING_SNAKE_CASE__ = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def __a ( self : int ):
"""simple docstring"""
with self.assertRaises(_lowercase ):
SCREAMING_SNAKE_CASE__ = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""bool""" ) , type=Value("""int64""" ) ) )
def __a ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = pa.array(TypedSequence([1, 2, 3] , type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def __a ( self : Optional[Any] ):
"""simple docstring"""
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
SCREAMING_SNAKE_CASE__ = pa.array(TypedSequence(["""foo""", """bar"""] , type=Value("""int64""" ) ) )
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=Value("""int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
def __a ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def __a ( self : Dict ):
"""simple docstring"""
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
SCREAMING_SNAKE_CASE__ = pa.array(TypedSequence(["""foo""", """bar"""] , type=ArrayaD((1, 3) , """int64""" ) ) )
def __a ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def __a ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def __a ( self : List[str] ):
"""simple docstring"""
import PIL.Image
SCREAMING_SNAKE_CASE__ = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"""datasets.arrow_writer.cast_to_python_objects""" , side_effect=_lowercase ) as mock_cast_to_python_objects:
SCREAMING_SNAKE_CASE__ = pa.array(TypedSequence([{"""path""": None, """bytes""": b"""image_bytes"""}, pil_image] , type=Image() ) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("""optimize_list_casting""" , _lowercase )
self.assertFalse(kwargs["""optimize_list_casting"""] )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = pa.BufferReader(__UpperCamelCase ) if isinstance(__UpperCamelCase , pa.Buffer ) else pa.memory_map(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = pa.ipc.open_stream(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : int ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = pa.BufferOutputStream()
SCREAMING_SNAKE_CASE__ = pa.schema(__UpperCamelCase ) if fields else None
with ArrowWriter(stream=__UpperCamelCase , schema=__UpperCamelCase , writer_batch_size=__UpperCamelCase ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE__ = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(__UpperCamelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __SCREAMING_SNAKE_CASE ( ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = pa.BufferOutputStream()
SCREAMING_SNAKE_CASE__ = Features({"""labels""": ClassLabel(names=["""neg""", """pos"""] )} )
with ArrowWriter(stream=__UpperCamelCase , features=__UpperCamelCase ) as writer:
writer.write({"""labels""": 0} )
writer.write({"""labels""": 1} )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
SCREAMING_SNAKE_CASE__ = pa.BufferReader(output.getvalue() )
SCREAMING_SNAKE_CASE__ = pa.ipc.open_stream(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = f.read_all()
SCREAMING_SNAKE_CASE__ = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(__UpperCamelCase )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = pa.BufferOutputStream()
with ArrowWriter(
stream=__UpperCamelCase , writer_batch_size=__UpperCamelCase , hash_salt="""split_name""" , check_duplicates=__UpperCamelCase , ) as writer:
with pytest.raises(__UpperCamelCase ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=[1, 2] )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 10] )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = pa.BufferOutputStream()
with ArrowWriter(
stream=__UpperCamelCase , writer_batch_size=__UpperCamelCase , hash_salt="""split_name""" , check_duplicates=__UpperCamelCase , ) as writer:
with pytest.raises(__UpperCamelCase ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=10 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=10 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 10] )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = pa.BufferOutputStream()
with ArrowWriter(
stream=__UpperCamelCase , writer_batch_size=__UpperCamelCase , hash_salt="""split_name""" , check_duplicates=__UpperCamelCase , ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=2 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = pa.BufferOutputStream()
SCREAMING_SNAKE_CASE__ = pa.schema(__UpperCamelCase ) if fields else None
with ArrowWriter(stream=__UpperCamelCase , schema=__UpperCamelCase , writer_batch_size=__UpperCamelCase ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
writer.write_batch({"""col_1""": [], """col_2""": []} )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE__ = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(__UpperCamelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = pa.BufferOutputStream()
SCREAMING_SNAKE_CASE__ = pa.schema(__UpperCamelCase ) if fields else None
with ArrowWriter(stream=__UpperCamelCase , schema=__UpperCamelCase , writer_batch_size=__UpperCamelCase ) as writer:
writer.write_table(pa.Table.from_pydict({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE__ = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(__UpperCamelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = pa.BufferOutputStream()
SCREAMING_SNAKE_CASE__ = pa.schema(__UpperCamelCase ) if fields else None
with ArrowWriter(stream=__UpperCamelCase , schema=__UpperCamelCase , writer_batch_size=__UpperCamelCase ) as writer:
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""foo"""], """col_2""": [1]} ) )
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""bar"""], """col_2""": [2]} ) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE__ = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(__UpperCamelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
SCREAMING_SNAKE_CASE__ = os.path.join(__UpperCamelCase , """test.arrow""" )
with ArrowWriter(path=__UpperCamelCase , schema=pa.schema(__UpperCamelCase ) ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(__UpperCamelCase , metadata=writer._schema.metadata )
_check_output(__UpperCamelCase , 1 )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
if pa.types.is_list(__UpperCamelCase ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
if isinstance(lst[0] , __UpperCamelCase ):
change_first_primitive_element_in_list(lst[0] , __UpperCamelCase )
else:
SCREAMING_SNAKE_CASE__ = value
@pytest.mark.parametrize("""optimized_int_type, expected_dtype""" , [(None, pa.intaa()), (Value("""int32""" ), pa.intaa())] )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = pa.array(TypedSequence(__UpperCamelCase , optimized_int_type=__UpperCamelCase ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"""col, expected_dtype""" , [
("""attention_mask""", pa.inta()),
("""special_tokens_mask""", pa.inta()),
("""token_type_ids""", pa.inta()),
("""input_ids""", pa.intaa()),
("""other""", pa.intaa()),
] , )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = pa.array(OptimizedTypedSequence(__UpperCamelCase , col=__UpperCamelCase ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
SCREAMING_SNAKE_CASE__ = copy.deepcopy(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE__ = pa.array(OptimizedTypedSequence(__UpperCamelCase , col=__UpperCamelCase ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("""raise_exception""" , [False, True] )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = str(tmp_path / """dataset-train.arrow""" )
try:
with ArrowWriter(path=__UpperCamelCase ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """mock://dataset-train.arrow"""
with ArrowWriter(path=__UpperCamelCase , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(__UpperCamelCase ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(__UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = pa.BufferOutputStream()
with ParquetWriter(stream=__UpperCamelCase ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
SCREAMING_SNAKE_CASE__ = pa.BufferReader(output.getvalue() )
SCREAMING_SNAKE_CASE__ = pq.read_table(__UpperCamelCase )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("""embed_local_files""" , [False, True] )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple , __UpperCamelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
import PIL.Image
SCREAMING_SNAKE_CASE__ = str(tmp_path / """test_image_rgb.jpg""" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(__UpperCamelCase , format="""png""" )
SCREAMING_SNAKE_CASE__ = pa.BufferOutputStream()
with ParquetWriter(
stream=__UpperCamelCase , features=Features({"""image""": Image()} ) , embed_local_files=__UpperCamelCase ) as writer:
writer.write({"""image""": image_path} )
writer.finalize()
SCREAMING_SNAKE_CASE__ = pa.BufferReader(output.getvalue() )
SCREAMING_SNAKE_CASE__ = pq.read_table(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["""image"""][0]["""path"""] , __UpperCamelCase )
with open(__UpperCamelCase , """rb""" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = pa.schema([pa.field("""col_1""" , pa.string() , nullable=__UpperCamelCase )] )
SCREAMING_SNAKE_CASE__ = pa.BufferOutputStream()
with ArrowWriter(stream=__UpperCamelCase ) as writer:
writer._build_writer(inferred_schema=__UpperCamelCase )
assert writer._schema == pa.schema([pa.field("""col_1""" , pa.string() )] )
| 379
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase__ : str = {
"""configuration_altclip""": [
"""ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AltCLIPConfig""",
"""AltCLIPTextConfig""",
"""AltCLIPVisionConfig""",
],
"""processing_altclip""": ["""AltCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Union[str, Any] = [
"""ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AltCLIPPreTrainedModel""",
"""AltCLIPModel""",
"""AltCLIPTextModel""",
"""AltCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
lowerCamelCase__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 12
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ : Tuple = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[int] = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 12
| 1
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
lowercase_ : Dict = logging.get_logger(__name__)
class __UpperCamelCase (_UpperCAmelCase ):
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> None:
'''simple docstring'''
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
| 653
|
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __UpperCamelCase :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=3 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=224 , _lowerCAmelCase=1000 , _lowerCAmelCase=[3, 3, 6, 4] , _lowerCAmelCase=[48, 56, 112, 220] , ) -> List[str]:
'''simple docstring'''
lowercase = parent
lowercase = batch_size
lowercase = num_channels
lowercase = is_training
lowercase = use_labels
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = num_labels
lowercase = image_size
lowercase = layer_depths
lowercase = embed_dims
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.num_labels )
lowercase = self.get_config()
return config, pixel_values, labels
def _a ( self ) -> int:
'''simple docstring'''
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_lowerCAmelCase , layer_scale_init_value=1E-5 , )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = SwiftFormerModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = self.num_labels
lowercase = SwiftFormerForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
lowercase = SwiftFormerForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
((lowercase) , (lowercase) , (lowercase)) = self.prepare_config_and_inputs()
lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
__A = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
__A = (
{'''feature-extraction''': SwiftFormerModel, '''image-classification''': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
__A = False
__A = False
__A = False
__A = False
__A = False
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = SwiftFormerModelTester(self )
lowercase = ConfigTester(
self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def _a ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def _a ( self ) -> List[str]:
'''simple docstring'''
pass
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(_lowerCAmelCase )
lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def _a ( self ) -> int:
'''simple docstring'''
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(_lowerCAmelCase )
lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def _a ( self ) -> Any:
'''simple docstring'''
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = SwiftFormerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
def check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
lowercase = outputs.hidden_states
lowercase = 8
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(_lowerCAmelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _a ( self ) -> Dict:
'''simple docstring'''
def _config_zero_init(_lowerCAmelCase ):
lowercase = copy.deepcopy(_lowerCAmelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(_lowerCAmelCase , _lowerCAmelCase , 1E-10 )
if isinstance(getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase ):
lowercase = _config_zero_init(getattr(_lowerCAmelCase , _lowerCAmelCase ) )
setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return configs_no_init
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = _config_zero_init(_lowerCAmelCase )
for model_class in self.all_model_classes:
lowercase = model_class(config=_lowerCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _a ( self ) -> Any:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( ):
lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __UpperCamelCase (unittest.TestCase ):
@cached_property
def _a ( self ) -> List[str]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(_lowerCAmelCase )
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase = model(**_lowerCAmelCase )
# verify the logits
lowercase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
lowercase = torch.tensor([[-2.17_03E00, 2.11_07E00, -2.08_11E00]] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
| 653
| 1
|
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , __lowercase : Union[str, Any] , __lowercase : Any=13 , __lowercase : Optional[int]=7 , __lowercase : str=True , __lowercase : Optional[Any]=True , __lowercase : int=True , __lowercase : int=True , __lowercase : List[str]=99 , __lowercase : int=32 , __lowercase : int=5 , __lowercase : Tuple=4 , __lowercase : str=37 , __lowercase : Optional[int]="gelu" , __lowercase : Tuple=0.1 , __lowercase : str=0.1 , __lowercase : Dict=512 , __lowercase : List[Any]=16 , __lowercase : Dict=2 , __lowercase : Union[str, Any]=0.02 , __lowercase : Dict=4 , ) -> int:
__UpperCAmelCase : Dict = parent
__UpperCAmelCase : Any = batch_size
__UpperCAmelCase : str = seq_length
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[Any] = use_attention_mask
__UpperCAmelCase : Dict = use_token_type_ids
__UpperCAmelCase : Optional[int] = use_labels
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : List[Any] = hidden_size
__UpperCAmelCase : Optional[Any] = num_hidden_layers
__UpperCAmelCase : Any = num_attention_heads
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : Any = hidden_act
__UpperCAmelCase : Any = hidden_dropout_prob
__UpperCAmelCase : List[Any] = attention_probs_dropout_prob
__UpperCAmelCase : Dict = max_position_embeddings
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : str = type_sequence_label_size
__UpperCAmelCase : List[str] = initializer_range
__UpperCAmelCase : Tuple = num_choices
def UpperCAmelCase ( self : Dict ) -> Tuple:
__UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Optional[Any] = None
if self.use_attention_mask:
__UpperCAmelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : List[Any] = None
if self.use_token_type_ids:
__UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : Optional[Any] = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
__UpperCAmelCase : int = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = config_and_inputs
__UpperCAmelCase : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def UpperCAmelCase ( self : Any ) -> List[str]:
__UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = config_and_inputs
__UpperCAmelCase : int = True
__UpperCAmelCase : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] = True
a : List[Any] = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
__UpperCAmelCase : List[str] = FlaxRobertaModelTester(self )
@slow
def UpperCAmelCase ( self : str ) -> List[Any]:
for model_class_name in self.all_model_classes:
__UpperCAmelCase : Union[str, Any] = model_class_name.from_pretrained("""roberta-base""" , from_pt=__lowercase )
__UpperCAmelCase : int = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowercase )
| 63
|
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[int] = GPTSanJapaneseTokenizer
a : Optional[Any] = False
a : List[str] = {'do_clean_text': False, 'add_prefix_space': False}
def UpperCAmelCase ( self : Tuple ) -> Any:
super().setUp()
# fmt: off
__UpperCAmelCase : Tuple = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
__UpperCAmelCase : Dict = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
__UpperCAmelCase : Dict = {"""unk_token""": """<unk>"""}
__UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(__lowercase ) )
def UpperCAmelCase ( self : Tuple , **__lowercase : int ) -> Any:
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def UpperCAmelCase ( self : str , __lowercase : Union[str, Any] ) -> Any:
__UpperCAmelCase : Any = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
__UpperCAmelCase : int = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def UpperCAmelCase ( self : List[Any] , __lowercase : Optional[int] ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase : int = self.get_input_output_texts(__lowercase )
__UpperCAmelCase : Tuple = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
__UpperCAmelCase : Dict = tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase )
return text, ids
def UpperCAmelCase ( self : int ) -> Optional[Any]:
pass # TODO add if relevant
def UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
pass # TODO add if relevant
def UpperCAmelCase ( self : Dict ) -> Tuple:
pass # TODO add if relevant
def UpperCAmelCase ( self : str ) -> Tuple:
__UpperCAmelCase : List[str] = self.get_tokenizer()
# Testing tokenization
__UpperCAmelCase : int = """こんにちは、世界。 こんばんは、㔺界。"""
__UpperCAmelCase : Dict = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
__UpperCAmelCase : Optional[Any] = tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# Testing conversion to ids without special tokens
__UpperCAmelCase : List[str] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
__UpperCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# Testing conversion to ids with special tokens
__UpperCAmelCase : List[Any] = tokens + [tokenizer.unk_token]
__UpperCAmelCase : str = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
__UpperCAmelCase : Any = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def UpperCAmelCase ( self : Tuple ) -> Dict:
__UpperCAmelCase : int = self.get_tokenizer()
# Testing tokenization
__UpperCAmelCase : Tuple = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
__UpperCAmelCase : int = """こんにちは、、、、世界。こんばんは、、、、世界。"""
__UpperCAmelCase : Tuple = tokenizer.encode(__lowercase )
__UpperCAmelCase : int = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : int ) -> Optional[int]:
__UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__UpperCAmelCase : List[Any] = """こんにちは、世界。"""
__UpperCAmelCase : Optional[int] = """こんばんは、㔺界。😀"""
__UpperCAmelCase : List[Any] = """こんにちは、世界。こんばんは、世界。😀"""
__UpperCAmelCase : List[str] = tokenizer.encode(prefix_text + input_text )
__UpperCAmelCase : List[Any] = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
__UpperCAmelCase : Any = tokenizer.encode(__lowercase , prefix_text=__lowercase )
__UpperCAmelCase : Optional[int] = tokenizer.decode(__lowercase )
__UpperCAmelCase : Any = tokenizer.decode(__lowercase )
__UpperCAmelCase : Optional[Any] = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : Any ) -> str:
__UpperCAmelCase : int = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__UpperCAmelCase : int = """こんにちは、世界。"""
__UpperCAmelCase : List[Any] = """こんばんは、㔺界。😀"""
__UpperCAmelCase : Union[str, Any] = len(tokenizer.encode(__lowercase ) ) - 2
__UpperCAmelCase : int = len(tokenizer.encode(__lowercase ) ) - 2
__UpperCAmelCase : List[Any] = [1] + [0] * (len_prefix + len_text + 1)
__UpperCAmelCase : Union[str, Any] = [1] * (len_prefix + len_text + 1) + [0]
__UpperCAmelCase : List[Any] = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
__UpperCAmelCase : Union[str, Any] = tokenizer(prefix_text + input_text ).token_type_ids
__UpperCAmelCase : Optional[Any] = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
__UpperCAmelCase : Tuple = tokenizer(__lowercase , prefix_text=__lowercase ).token_type_ids
self.assertListEqual(__lowercase , __lowercase )
self.assertListEqual(__lowercase , __lowercase )
self.assertListEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : List[str] ) -> int:
__UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__UpperCAmelCase : Optional[int] = tokenizer.encode("""あンいワ""" )
__UpperCAmelCase : Tuple = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
__UpperCAmelCase : Optional[int] = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(__lowercase ) , tokenizer.decode(__lowercase ) )
self.assertEqual(tokenizer.decode(__lowercase ) , tokenizer.decode(__lowercase ) )
self.assertNotEqual(__lowercase , __lowercase )
self.assertNotEqual(__lowercase , __lowercase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
__UpperCAmelCase : Any = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__UpperCAmelCase : List[Any] = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
__UpperCAmelCase : int = tokenizer(__lowercase , padding=__lowercase )
__UpperCAmelCase : Optional[Any] = tokenizer.batch_encode_plus(__lowercase , padding=__lowercase )
# fmt: off
__UpperCAmelCase : Optional[int] = [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]]
__UpperCAmelCase : Tuple = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
__UpperCAmelCase : Union[str, Any] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __lowercase )
self.assertListEqual(x_token.token_type_ids , __lowercase )
self.assertListEqual(x_token.attention_mask , __lowercase )
self.assertListEqual(x_token_a.input_ids , __lowercase )
self.assertListEqual(x_token_a.token_type_ids , __lowercase )
self.assertListEqual(x_token_a.attention_mask , __lowercase )
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def UpperCAmelCase ( self : Any ) -> int:
# tokenizer has no padding token
pass
| 63
| 1
|
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class A_ ( datasets.BuilderConfig ):
_lowerCamelCase : Optional[datasets.Features] = None
class A_ ( datasets.ArrowBasedBuilder ):
_lowerCamelCase : Union[str, Any] = PandasConfig
def lowercase ( self : str ):
return datasets.DatasetInfo(features=self.config.features )
def lowercase ( self : Dict , snake_case_ : Dict ):
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
_UpperCAmelCase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(snake_case_ , (str, list, tuple) ):
_UpperCAmelCase = data_files
if isinstance(snake_case_ , snake_case_ ):
_UpperCAmelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_UpperCAmelCase = [dl_manager.iter_files(snake_case_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
_UpperCAmelCase = []
for split_name, files in data_files.items():
if isinstance(snake_case_ , snake_case_ ):
_UpperCAmelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_UpperCAmelCase = [dl_manager.iter_files(snake_case_ ) for file in files]
splits.append(datasets.SplitGenerator(name=snake_case_ , gen_kwargs={"files": files} ) )
return splits
def lowercase ( self : Tuple , snake_case_ : pa.Table ):
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_UpperCAmelCase = table_cast(snake_case_ , self.config.features.arrow_schema )
return pa_table
def lowercase ( self : List[Any] , snake_case_ : int ):
for i, file in enumerate(itertools.chain.from_iterable(snake_case_ ) ):
with open(snake_case_ , "rb" ) as f:
_UpperCAmelCase = pa.Table.from_pandas(pd.read_pickle(snake_case_ ) )
yield i, self._cast_table(snake_case_ )
| 717
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class A_ ( unittest.TestCase ):
def lowercase ( self : List[str] ):
_UpperCAmelCase = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
_UpperCAmelCase = get_activation("gelu" )
self.assertTrue(torch.allclose(gelu_python(snake_case_ ) , torch_builtin(snake_case_ ) ) )
self.assertFalse(torch.allclose(gelu_python(snake_case_ ) , gelu_new(snake_case_ ) ) )
def lowercase ( self : int ):
_UpperCAmelCase = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
_UpperCAmelCase = get_activation("gelu" )
_UpperCAmelCase = get_activation("gelu_10" )
_UpperCAmelCase = torch_builtin(snake_case_ )
_UpperCAmelCase = geluaa(snake_case_ )
_UpperCAmelCase = torch.where(y_gelu_aa < 1_0.0 , 1 , 0 )
self.assertTrue(torch.max(snake_case_ ).item() == 1_0.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def lowercase ( self : Any ):
get_activation("gelu" )
get_activation("gelu_10" )
get_activation("gelu_fast" )
get_activation("gelu_new" )
get_activation("gelu_python" )
get_activation("gelu_pytorch_tanh" )
get_activation("linear" )
get_activation("mish" )
get_activation("quick_gelu" )
get_activation("relu" )
get_activation("sigmoid" )
get_activation("silu" )
get_activation("swish" )
get_activation("tanh" )
with self.assertRaises(snake_case_ ):
get_activation("bogus" )
with self.assertRaises(snake_case_ ):
get_activation(snake_case_ )
def lowercase ( self : Dict ):
_UpperCAmelCase = get_activation("gelu" )
_UpperCAmelCase = 1
_UpperCAmelCase = get_activation("gelu" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(snake_case_ ):
_UpperCAmelCase = acta.a
| 119
| 0
|
'''simple docstring'''
from sklearn.metrics import fa_score
import datasets
UpperCAmelCase_ : Dict = '''
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
'''
UpperCAmelCase_ : Union[str, Any] = '''
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{\'f1\': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results[\'f1\'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results[\'f1\'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")
>>> print(round(results[\'f1\'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'f1\': array([0.8, 0. , 0. ])}
'''
UpperCAmelCase_ : List[Any] = '''
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowerCAmelCase ( datasets.Metric):
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE="binary" , __SCREAMING_SNAKE_CASE=None ) -> List[str]:
'''simple docstring'''
__snake_case = fa_score(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , pos_label=__SCREAMING_SNAKE_CASE , average=__SCREAMING_SNAKE_CASE , sample_weight=__SCREAMING_SNAKE_CASE )
return {"f1": float(__SCREAMING_SNAKE_CASE ) if score.size == 1 else score}
| 24
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def UpperCamelCase ( _A, _A, _A, _A, _A, _A = None, ):
"""simple docstring"""
__magic_name__ : int = {}
if train_file is not None:
__magic_name__ : Union[str, Any] = [train_file]
if eval_file is not None:
__magic_name__ : Dict = [eval_file]
if test_file is not None:
__magic_name__ : Any = [test_file]
__magic_name__ : Union[str, Any] = datasets.load_dataset("""csv""", data_files=_A )
__magic_name__ : Optional[Any] = list(ds[list(files.keys() )[0]].features.keys() )
__magic_name__ : Union[str, Any] = features_name.pop(_A )
__magic_name__ : List[str] = list(set(ds[list(files.keys() )[0]][label_name] ) )
__magic_name__ : Any = {label: i for i, label in enumerate(_A )}
__magic_name__ : Any = tokenizer.model_input_names
__magic_name__ : List[str] = {}
if len(_A ) == 1:
for k in files.keys():
__magic_name__ : Optional[Any] = ds[k].map(
lambda _A : tokenizer.batch_encode_plus(
example[features_name[0]], truncation=_A, max_length=_A, padding="""max_length""" ), batched=_A, )
elif len(_A ) == 2:
for k in files.keys():
__magic_name__ : List[str] = ds[k].map(
lambda _A : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]), truncation=_A, max_length=_A, padding="""max_length""", ), batched=_A, )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__magic_name__ : Optional[Any] = {k: v for k, v in ex.items() if k in input_names}
__magic_name__ : Union[str, Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__magic_name__ : Optional[int] = {k: v for k, v in ex.items() if k in input_names}
__magic_name__ : Optional[int] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__magic_name__ : int = {k: v for k, v in ex.items() if k in input_names}
__magic_name__ : Tuple = labelaid[ex[label_name]]
yield (d, label)
__magic_name__ : Union[str, Any] = (
tf.data.Dataset.from_generator(
_A, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__magic_name__ : int = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__magic_name__ : Optional[int] = (
tf.data.Dataset.from_generator(
_A, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__magic_name__ : Optional[Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__magic_name__ : Any = (
tf.data.Dataset.from_generator(
_A, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__magic_name__ : List[str] = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
__magic_name__: Optional[int] = logging.getLogger(__name__)
@dataclass
class snake_case__ :
lowercase__ : int = field(metadata={'''help''': '''Which column contains the label'''} )
lowercase__ : str = field(default=_lowerCAmelCase , metadata={'''help''': '''The path of the training file'''} )
lowercase__ : Optional[str] = field(default=_lowerCAmelCase , metadata={'''help''': '''The path of the development file'''} )
lowercase__ : Optional[str] = field(default=_lowerCAmelCase , metadata={'''help''': '''The path of the test file'''} )
lowercase__ : int = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
lowercase__ : bool = field(
default=_lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class snake_case__ :
lowercase__ : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowercase__ : Optional[str] = field(
default=_lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowercase__ : Optional[str] = field(
default=_lowerCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
lowercase__ : bool = field(default=_lowerCAmelCase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowercase__ : Optional[str] = field(
default=_lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def UpperCamelCase ( ):
"""simple docstring"""
__magic_name__ : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__magic_name__ ,__magic_name__ ,__magic_name__ : List[str] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO, )
logger.info(
f'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '
f'16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__magic_name__ : Optional[int] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ : int = get_tfds(
train_file=data_args.train_file, eval_file=data_args.dev_file, test_file=data_args.test_file, tokenizer=_A, label_column_id=data_args.label_column_id, max_seq_length=data_args.max_seq_length, )
__magic_name__ : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=len(_A ), labelaid=_A, idalabel={id: label for label, id in labelaid.items()}, finetuning_task="""text-classification""", cache_dir=model_args.cache_dir, )
with training_args.strategy.scope():
__magic_name__ : List[str] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path, from_pt=bool(""".bin""" in model_args.model_name_or_path ), config=_A, cache_dir=model_args.cache_dir, )
def compute_metrics(_A ) -> Dict:
__magic_name__ : Dict = np.argmax(p.predictions, axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__magic_name__ : Optional[Any] = TFTrainer(
model=_A, args=_A, train_dataset=_A, eval_dataset=_A, compute_metrics=_A, )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__magic_name__ : Optional[Any] = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__magic_name__ : Any = trainer.evaluate()
__magic_name__ : Union[str, Any] = os.path.join(training_args.output_dir, """eval_results.txt""" )
with open(_A, """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(f' {key} = {value}' )
writer.write(f'{key} = {value}\n' )
results.update(_A )
return results
if __name__ == "__main__":
main()
| 324
| 0
|
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __lowercase (UpperCamelCase_ ):
def __init__( self , *A_ , A_=None , A_=None , **A_ ) ->List[str]:
'''simple docstring'''
super().__init__(*_a , **_a )
__lowerCAmelCase : str = eval_examples
__lowerCAmelCase : Optional[Any] = post_process_function
def UpperCamelCase__ ( self , A_=None , A_=None , A_=None , A_ = "eval" ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : Dict = self.eval_dataset if eval_dataset is None else eval_dataset
__lowerCAmelCase : Dict = self.get_eval_dataloader(_a )
__lowerCAmelCase : Union[str, Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__lowerCAmelCase : Optional[int] = self.compute_metrics
__lowerCAmelCase : int = None
__lowerCAmelCase : Union[str, Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
__lowerCAmelCase : Dict = time.time()
try:
__lowerCAmelCase : Optional[int] = eval_loop(
_a , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_a , metric_key_prefix=_a , )
finally:
__lowerCAmelCase : Optional[int] = compute_metrics
__lowerCAmelCase : Optional[int] = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
_a , _a , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__lowerCAmelCase : List[Any] = self.post_process_function(_a , _a , output.predictions )
__lowerCAmelCase : int = self.compute_metrics(_a )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
__lowerCAmelCase : int = metrics.pop(_a )
metrics.update(output.metrics )
else:
__lowerCAmelCase : List[Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_a )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__lowerCAmelCase : List[str] = self.callback_handler.on_evaluate(self.args , self.state , self.control , _a )
return metrics
def UpperCamelCase__ ( self , A_ , A_ , A_=None , A_ = "test" ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = self.get_test_dataloader(_a )
# Temporarily disable metric computation, we will do it in the loop here.
__lowerCAmelCase : Optional[Any] = self.compute_metrics
__lowerCAmelCase : Any = None
__lowerCAmelCase : Union[str, Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
__lowerCAmelCase : Tuple = time.time()
try:
__lowerCAmelCase : Any = eval_loop(
_a , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_a , metric_key_prefix=_a , )
finally:
__lowerCAmelCase : Optional[Any] = compute_metrics
__lowerCAmelCase : str = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
_a , _a , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
__lowerCAmelCase : Union[str, Any] = self.post_process_function(_a , _a , output.predictions , '''predict''' )
__lowerCAmelCase : int = self.compute_metrics(_a )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
__lowerCAmelCase : str = metrics.pop(_a )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_a )
| 712
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_UpperCamelCase = {
"configuration_gpt_neox_japanese": ["GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXJapaneseConfig"],
"tokenization_gpt_neox_japanese": ["GPTNeoXJapaneseTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXJapaneseForCausalLM",
"GPTNeoXJapaneseLayer",
"GPTNeoXJapaneseModel",
"GPTNeoXJapanesePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 583
| 0
|
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCAmelCase: Union[str, Any] = imread(r'digital_image_processing/image_data/lena_small.jpg')
lowerCAmelCase: Union[str, Any] = cvtColor(img, COLOR_BGR2GRAY)
def lowerCamelCase__ ( ):
a : Any = cn.convert_to_negative(__snake_case )
# assert negative_img array for at least one True
assert negative_img.any()
def lowerCamelCase__ ( ):
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(__snake_case , 110 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def lowerCamelCase__ ( ):
a : List[str] = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def lowerCamelCase__ ( ):
a : Dict = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
a : str = canny.canny(__snake_case )
# assert canny array for at least one True
assert canny_array.any()
def lowerCamelCase__ ( ):
assert gg.gaussian_filter(__snake_case , 5 , sigma=0.9 ).all()
def lowerCamelCase__ ( ):
a : int = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
a : str = conv.img_convolve(__snake_case , __snake_case ).astype(__snake_case )
assert res.any()
def lowerCamelCase__ ( ):
assert med.median_filter(__snake_case , 3 ).any()
def lowerCamelCase__ ( ):
a , a : List[Any] = sob.sobel_filter(__snake_case )
assert grad.any() and theta.any()
def lowerCamelCase__ ( ):
a : Dict = sp.make_sepia(__snake_case , 20 )
assert sepia.all()
def lowerCamelCase__ ( _A = "digital_image_processing/image_data/lena_small.jpg" ):
a : Union[str, Any] = bs.Burkes(imread(__snake_case , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def lowerCamelCase__ ( _A = "digital_image_processing/image_data/lena_small.jpg" , ):
a : str = rs.NearestNeighbour(imread(__snake_case , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def lowerCamelCase__ ( ):
a : Any = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
a : Union[str, Any] = imread(__snake_case , 0 )
# Test for get_neighbors_pixel function() return not None
a : List[str] = 0
a : Any = 0
a : str = image[x_coordinate][y_coordinate]
a : List[str] = lbp.get_neighbors_pixel(
__snake_case , __snake_case , __snake_case , __snake_case )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
a : Dict = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
a : List[Any] = lbp.local_binary_value(__snake_case , __snake_case , __snake_case )
assert lbp_image.any()
| 526
|
from __future__ import annotations
def lowerCAmelCase__(__snake_case ,__snake_case ) -> list[int]:
'''simple docstring'''
lowerCamelCase__ = 0
lowerCamelCase__ = len(__snake_case ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
lowerCamelCase__ = i + 1
else:
lowerCamelCase__ = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{two_pointer([2, 7, 11, 15], 9) = }""")
| 481
| 0
|
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
UpperCamelCase = {
'''gwf-440k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''',
'''sample_rate''': 48_000,
'''sample_size''': 65_536,
},
'''jmann-small-190k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''',
'''sample_rate''': 48_000,
'''sample_size''': 65_536,
},
'''jmann-large-580k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''',
'''sample_rate''': 48_000,
'''sample_size''': 131_072,
},
'''maestro-uncond-150k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''',
'''sample_rate''': 16_000,
'''sample_size''': 65_536,
},
'''unlocked-uncond-250k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''',
'''sample_rate''': 16_000,
'''sample_size''': 65_536,
},
'''honk-140k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''',
'''sample_rate''': 16_000,
'''sample_size''': 65_536,
},
}
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
return torch.atana(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) / math.pi * 2
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Optional[Any]:
_lowercase : Dict = torch.sin(t * math.pi / 2 ) ** 2
_lowercase : Union[str, Any] = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
class lowerCAmelCase_ ( UpperCamelCase__ ):
pass
class lowerCAmelCase_ ( nn.Module ):
def __init__( self , _lowerCAmelCase ):
super().__init__()
_lowercase : Union[str, Any] = DiffusionAttnUnetaD(_lowerCAmelCase , n_attn_layers=4 )
_lowercase : Union[str, Any] = deepcopy(self.diffusion )
_lowercase : Optional[Any] = torch.quasirandom.SobolEngine(1 , scramble=_lowerCAmelCase )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Dict:
_lowercase : Optional[int] = MODELS_MAP[model_name]['url']
os.system(F"""wget {url} ./""" )
return F"""./{model_name}.ckpt"""
UpperCamelCase = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
}
UpperCamelCase = {
'''8''': '''resnets.0''',
'''9''': '''attentions.0''',
'''10''': '''resnets.1''',
'''11''': '''attentions.1''',
'''12''': '''resnets.2''',
'''13''': '''attentions.2''',
}
UpperCamelCase = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
'''8''': '''resnets.3''',
'''9''': '''attentions.3''',
'''10''': '''resnets.4''',
'''11''': '''attentions.4''',
'''12''': '''resnets.5''',
'''13''': '''attentions.5''',
}
UpperCamelCase = {
'''0''': '''resnets.0''',
'''1''': '''resnets.1''',
'''2''': '''resnets.2''',
'''4''': '''resnets.0''',
'''5''': '''resnets.1''',
'''6''': '''resnets.2''',
}
UpperCamelCase = {
'''skip''': '''conv_skip''',
'''main.0''': '''conv_1''',
'''main.1''': '''group_norm_1''',
'''main.3''': '''conv_2''',
'''main.4''': '''group_norm_2''',
}
UpperCamelCase = {
'''norm''': '''group_norm''',
'''qkv_proj''': ['''query''', '''key''', '''value'''],
'''out_proj''': ['''proj_attn'''],
}
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Optional[int]:
if name.startswith('skip' ):
return name.replace('skip' , RES_CONV_MAP['skip'] )
# name has to be of format main.{digit}
if not name.startswith('main.' ):
raise ValueError(F"""ResConvBlock error with {name}""" )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> str:
for key, value in ATTN_MAP.items():
if name.startswith(SCREAMING_SNAKE_CASE ) and not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return name.replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif name.startswith(SCREAMING_SNAKE_CASE ):
return [name.replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for v in value]
raise ValueError(F"""Attn error with {name}""" )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 ) -> Optional[Any]:
_lowercase : List[str] = input_string
if string.split('.' )[0] == "timestep_embed":
return string.replace('timestep_embed' , 'time_proj' )
_lowercase : Any = 0
if string.startswith('net.3.' ):
depth += 1
_lowercase : str = string[6:]
elif string.startswith('net.' ):
_lowercase : Optional[Any] = string[4:]
while string.startswith('main.7.' ):
depth += 1
_lowercase : Any = string[7:]
if string.startswith('main.' ):
_lowercase : Optional[int] = string[5:]
# mid block
if string[:2].isdigit():
_lowercase : Any = string[:2]
_lowercase : Dict = string[2:]
else:
_lowercase : Union[str, Any] = string[0]
_lowercase : Tuple = string[1:]
if depth == max_depth:
_lowercase : Any = MID_NUM_TO_LAYER[layer_num]
_lowercase : List[str] = 'mid_block'
elif depth > 0 and int(SCREAMING_SNAKE_CASE ) < 7:
_lowercase : str = DOWN_NUM_TO_LAYER[layer_num]
_lowercase : Dict = F"""down_blocks.{depth}"""
elif depth > 0 and int(SCREAMING_SNAKE_CASE ) > 7:
_lowercase : str = UP_NUM_TO_LAYER[layer_num]
_lowercase : str = F"""up_blocks.{max_depth - depth - 1}"""
elif depth == 0:
_lowercase : Optional[Any] = DEPTH_0_TO_LAYER[layer_num]
_lowercase : str = F"""up_blocks.{max_depth - 1}""" if int(SCREAMING_SNAKE_CASE ) > 3 else 'down_blocks.0'
if not string_left.startswith('.' ):
raise ValueError(F"""Naming error with {input_string} and string_left: {string_left}.""" )
_lowercase : str = string_left[1:]
if "resnets" in new_layer:
_lowercase : Optional[int] = convert_resconv_naming(SCREAMING_SNAKE_CASE )
elif "attentions" in new_layer:
_lowercase : List[str] = convert_attn_naming(SCREAMING_SNAKE_CASE )
_lowercase : List[Any] = new_string_left
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_lowercase : int = prefix + '.' + new_layer + '.' + string_left
else:
_lowercase : Optional[int] = [prefix + '.' + new_layer + '.' + s for s in string_left]
return new_string
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Any:
_lowercase : Union[str, Any] = {}
for k, v in state_dict.items():
if k.endswith('kernel' ):
# up- and downsample layers, don't have trainable weights
continue
_lowercase : List[Any] = rename(SCREAMING_SNAKE_CASE )
# check if we need to transform from Conv => Linear for attention
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_lowercase : List[Any] = transform_conv_attns(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
_lowercase : int = v
return new_state_dict
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
if len(SCREAMING_SNAKE_CASE ) == 1:
if len(v.shape ) == 3:
# weight
_lowercase : int = v[:, :, 0]
else:
# bias
_lowercase : Tuple = v
else:
# qkv matrices
_lowercase : str = v.shape[0]
_lowercase : Dict = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
_lowercase : List[str] = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
_lowercase : List[Any] = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> List[str]:
_lowercase : Optional[Any] = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
_lowercase : Optional[Any] = args.model_path.split('/' )[-1].split('.' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F"""Make sure to provide one of the official model names {MODELS_MAP.keys()}"""
_lowercase : List[str] = download(SCREAMING_SNAKE_CASE )
_lowercase : str = MODELS_MAP[model_name]['sample_rate']
_lowercase : Union[str, Any] = MODELS_MAP[model_name]['sample_size']
_lowercase : Optional[Any] = Object()
_lowercase : List[str] = sample_size
_lowercase : List[Any] = sample_rate
_lowercase : List[Any] = 0
_lowercase : Tuple = UNetaDModel(sample_size=SCREAMING_SNAKE_CASE , sample_rate=SCREAMING_SNAKE_CASE )
_lowercase : Dict = diffusers_model.state_dict()
_lowercase : str = DiffusionUncond(SCREAMING_SNAKE_CASE )
orig_model.load_state_dict(torch.load(args.model_path , map_location=SCREAMING_SNAKE_CASE )['state_dict'] )
_lowercase : Union[str, Any] = orig_model.diffusion_ema.eval()
_lowercase : List[Any] = orig_model.state_dict()
_lowercase : Optional[int] = rename_orig_weights(SCREAMING_SNAKE_CASE )
_lowercase : List[str] = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
_lowercase : List[Any] = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(SCREAMING_SNAKE_CASE ) == 0, F"""Problem with {renamed_minus_diffusers}"""
assert all(k.endswith('kernel' ) for k in list(SCREAMING_SNAKE_CASE ) ), F"""Problem with {diffusers_minus_renamed}"""
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F"""Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}"""
if key == "time_proj.weight":
_lowercase : str = value.squeeze()
_lowercase : str = value
diffusers_model.load_state_dict(SCREAMING_SNAKE_CASE )
_lowercase : Optional[int] = 100
_lowercase : str = 33
_lowercase : Union[str, Any] = IPNDMScheduler(num_train_timesteps=SCREAMING_SNAKE_CASE )
_lowercase : List[str] = torch.manual_seed(SCREAMING_SNAKE_CASE )
_lowercase : Optional[Any] = torch.randn([1, 2, config.sample_size] , generator=SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE )
_lowercase : Optional[int] = torch.linspace(1 , 0 , steps + 1 , device=SCREAMING_SNAKE_CASE )[:-1]
_lowercase : int = get_crash_schedule(SCREAMING_SNAKE_CASE )
_lowercase : List[str] = DanceDiffusionPipeline(unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE )
_lowercase : List[Any] = torch.manual_seed(33 )
_lowercase : Optional[Any] = pipe(num_inference_steps=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).audios
_lowercase : List[str] = sampling.iplms_sample(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , {} )
_lowercase : Optional[int] = generated.clamp(-1 , 1 )
_lowercase : Optional[Any] = (generated - audio).abs().sum()
_lowercase : Optional[int] = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('Diff sum' , SCREAMING_SNAKE_CASE )
print('Diff max' , SCREAMING_SNAKE_CASE )
assert diff_max < 1E-3, F"""Diff max: {diff_max} is too much :-/"""
print(F"""Conversion for {model_name} successful!""" )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.")
UpperCamelCase = parser.parse_args()
main(args)
| 717
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=9_9 , _lowerCAmelCase=3_2 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=3_7 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=1_6 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , _lowerCAmelCase=1_0_0_0 , ):
_lowercase : List[str] = parent
_lowercase : Optional[Any] = batch_size
_lowercase : str = seq_length
_lowercase : Dict = is_training
_lowercase : Optional[int] = use_input_mask
_lowercase : List[Any] = use_token_type_ids
_lowercase : Union[str, Any] = use_labels
_lowercase : Optional[Any] = vocab_size
_lowercase : Optional[Any] = hidden_size
_lowercase : str = num_hidden_layers
_lowercase : Tuple = num_attention_heads
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[Any] = hidden_act
_lowercase : Union[str, Any] = hidden_dropout_prob
_lowercase : Union[str, Any] = attention_probs_dropout_prob
_lowercase : int = max_position_embeddings
_lowercase : str = type_vocab_size
_lowercase : Tuple = type_sequence_label_size
_lowercase : Dict = initializer_range
_lowercase : List[Any] = num_labels
_lowercase : List[str] = num_choices
_lowercase : Dict = scope
_lowercase : List[Any] = range_bbox
def __a ( self ):
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowercase : List[str] = bbox[i, j, 3]
_lowercase : Optional[int] = bbox[i, j, 1]
_lowercase : int = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowercase : Dict = bbox[i, j, 2]
_lowercase : Dict = bbox[i, j, 0]
_lowercase : int = t
_lowercase : Union[str, Any] = tf.convert_to_tensor(_lowerCAmelCase )
_lowercase : Any = None
if self.use_input_mask:
_lowercase : int = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : Tuple = None
if self.use_token_type_ids:
_lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : Tuple = None
_lowercase : Union[str, Any] = None
_lowercase : List[str] = None
if self.use_labels:
_lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase : str = ids_tensor([self.batch_size] , self.num_choices )
_lowercase : Any = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[Any] = TFLayoutLMModel(config=_lowerCAmelCase )
_lowercase : List[Any] = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_lowercase : List[Any] = model(_lowerCAmelCase , _lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_lowercase : List[str] = model(_lowerCAmelCase , _lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[Any] = TFLayoutLMForMaskedLM(config=_lowerCAmelCase )
_lowercase : Any = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : str = self.num_labels
_lowercase : Tuple = TFLayoutLMForSequenceClassification(config=_lowerCAmelCase )
_lowercase : int = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Any = self.num_labels
_lowercase : Optional[int] = TFLayoutLMForTokenClassification(config=_lowerCAmelCase )
_lowercase : Union[str, Any] = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Union[str, Any] = TFLayoutLMForQuestionAnswering(config=_lowerCAmelCase )
_lowercase : str = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self ):
_lowercase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : List[Any] = config_and_inputs
_lowercase : Optional[Any] = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCamelCase : Optional[int] = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
_UpperCamelCase : Union[str, Any] = (
{
"feature-extraction": TFLayoutLMModel,
"fill-mask": TFLayoutLMForMaskedLM,
"text-classification": TFLayoutLMForSequenceClassification,
"token-classification": TFLayoutLMForTokenClassification,
"zero-shot": TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCamelCase : str = False
_UpperCamelCase : List[str] = True
_UpperCamelCase : Tuple = 10
def __a ( self ):
_lowercase : Optional[int] = TFLayoutLMModelTester(self )
_lowercase : str = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=3_7 )
def __a ( self ):
self.config_tester.run_common_tests()
def __a ( self ):
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCAmelCase )
def __a ( self ):
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCAmelCase )
@slow
def __a ( self ):
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : List[Any] = TFLayoutLMModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip('Onnx compliancy broke with TF 2.10' )
def __a ( self ):
pass
def __magic_name__ ( ) -> Optional[int]:
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
_lowercase : Optional[Any] = tf.convert_to_tensor([[101,1_019,1_014,1_016,1_037,12_849,4_747,1_004,14_246,2_278,5_439,4_524,5_002,2_930,2_193,2_930,4_341,3_208,1_005,1_055,2_171,2_848,11_300,3_531,102],[101,4_070,4_034,7_020,1_024,3_058,1_015,1_013,2_861,1_013,6_070,19_274,2_772,6_205,27_814,16_147,16_147,4_343,2_047,10_283,10_969,14_389,1_012,2_338,102]] ) # noqa: E231
_lowercase : Tuple = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
_lowercase : Optional[int] = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1_000,1_000,1_000,1_000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1_000,1_000,1_000,1_000]]] ) # noqa: E231
_lowercase : int = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
_lowercase : Union[str, Any] = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def __a ( self ):
_lowercase : Tuple = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased' )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : Optional[int] = prepare_layoutlm_batch_inputs()
# forward pass
_lowercase : Tuple = model(input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
# test the sequence output on [0, :3, :3]
_lowercase : Optional[Any] = tf.convert_to_tensor(
[[0.17_85, -0.19_47, -0.04_25], [-0.32_54, -0.28_07, 0.25_53], [-0.53_91, -0.33_22, 0.33_64]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _lowerCAmelCase , atol=1E-3 ) )
# test the pooled output on [1, :3]
_lowercase : Optional[int] = tf.convert_to_tensor([-0.65_80, -0.02_14, 0.85_52] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , _lowerCAmelCase , atol=1E-3 ) )
@slow
def __a ( self ):
# initialize model with randomly initialized sequence classification head
_lowercase : Optional[Any] = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=2 )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : Optional[Any] = prepare_layoutlm_batch_inputs()
# forward pass
_lowercase : Any = model(
input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
_lowercase : List[Any] = outputs.loss
_lowercase : Any = (2,)
self.assertEqual(loss.shape , _lowerCAmelCase )
# test the shape of the logits
_lowercase : str = outputs.logits
_lowercase : Dict = (2, 2)
self.assertEqual(logits.shape , _lowerCAmelCase )
@slow
def __a ( self ):
# initialize model with randomly initialized token classification head
_lowercase : Dict = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=1_3 )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : str = prepare_layoutlm_batch_inputs()
# forward pass
_lowercase : Dict = model(
input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
# test the shape of the logits
_lowercase : Dict = outputs.logits
_lowercase : Optional[Any] = tf.convert_to_tensor((2, 2_5, 1_3) )
self.assertEqual(logits.shape , _lowerCAmelCase )
@slow
def __a ( self ):
# initialize model with randomly initialized token classification head
_lowercase : Union[str, Any] = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased' )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : List[Any] = prepare_layoutlm_batch_inputs()
# forward pass
_lowercase : int = model(input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
# test the shape of the logits
_lowercase : Any = tf.convert_to_tensor((2, 2_5) )
self.assertEqual(outputs.start_logits.shape , _lowerCAmelCase )
self.assertEqual(outputs.end_logits.shape , _lowerCAmelCase )
| 677
| 0
|
from __future__ import annotations
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ ) -> Any:
_A = TypeError(
"""Matrices must be formed from a list of zero or more lists containing at """
"""least one and the same number of values, each of which must be of type """
"""int or float.""" )
if len(lowerCAmelCase_ ) != 0:
_A = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(lowerCAmelCase_ ) != cols:
raise error
for value in row:
if not isinstance(lowerCAmelCase_ , (int, float) ):
raise error
_A = rows
else:
_A = []
def UpperCAmelCase ( self ) -> list[list[int]]:
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def UpperCAmelCase ( self ) -> int:
return len(self.rows )
@property
def UpperCAmelCase ( self ) -> int:
return len(self.rows[0] )
@property
def UpperCAmelCase ( self ) -> tuple[int, int]:
return (self.num_rows, self.num_columns)
@property
def UpperCAmelCase ( self ) -> bool:
return self.order[0] == self.order[1]
def UpperCAmelCase ( self ) -> Matrix:
_A = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> int:
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def UpperCAmelCase ( self ) -> bool:
return bool(self.determinant() )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
_A = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(lowerCAmelCase_ ).determinant()
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
if (row + column) % 2 == 0:
return self.get_minor(lowerCAmelCase_ , lowerCAmelCase_ )
return -1 * self.get_minor(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Matrix:
return Matrix(
[
[self.get_minor(lowerCAmelCase_ , lowerCAmelCase_ ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def UpperCAmelCase ( self ) -> Matrix:
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def UpperCAmelCase ( self ) -> Matrix:
_A = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Matrix:
_A = self.determinant()
if not determinant:
raise TypeError("""Only matrices with a non-zero determinant have an inverse""" )
return self.adjugate() * (1 / determinant)
def __repr__( self ) -> str:
return str(self.rows )
def __str__( self ) -> str:
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"""[""" + """. """.join([str(lowerCAmelCase_ ) for value in row] ) + """.]"""
for row in self.rows
] )
+ "]"
)
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> None:
_A = TypeError("""Row must be a list containing all ints and/or floats""" )
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise type_error
for value in row:
if not isinstance(lowerCAmelCase_ , (int, float) ):
raise type_error
if len(lowerCAmelCase_ ) != self.num_columns:
raise ValueError(
"""Row must be equal in length to the other rows in the matrix""" )
if position is None:
self.rows.append(lowerCAmelCase_ )
else:
_A = self.rows[0:position] + [row] + self.rows[position:]
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> None:
_A = TypeError(
"""Column must be a list containing all ints and/or floats""" )
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise type_error
for value in column:
if not isinstance(lowerCAmelCase_ , (int, float) ):
raise type_error
if len(lowerCAmelCase_ ) != self.num_rows:
raise ValueError(
"""Column must be equal in length to the other columns in the matrix""" )
if position is None:
_A = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
_A = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self , lowerCAmelCase_ ) -> bool:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return NotImplemented
return self.rows == other.rows
def __ne__( self , lowerCAmelCase_ ) -> bool:
return not self == other
def __neg__( self ) -> Matrix:
return self * -1
def __add__( self , lowerCAmelCase_ ) -> Matrix:
if self.order != other.order:
raise ValueError("""Addition requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self , lowerCAmelCase_ ) -> Matrix:
if self.order != other.order:
raise ValueError("""Subtraction requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self , lowerCAmelCase_ ) -> Matrix:
if isinstance(lowerCAmelCase_ , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
if self.num_columns != other.num_rows:
raise ValueError(
"""The number of columns in the first matrix must """
"""be equal to the number of rows in the second""" )
return Matrix(
[
[Matrix.dot_product(lowerCAmelCase_ , lowerCAmelCase_ ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"""A Matrix can only be multiplied by an int, float, or another matrix""" )
def __pow__( self , lowerCAmelCase_ ) -> Matrix:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise TypeError("""A Matrix can only be raised to the power of an int""" )
if not self.is_square:
raise ValueError("""Only square matrices can be raised to a power""" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"""Only invertable matrices can be raised to a negative power""" )
_A = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def UpperCAmelCase ( cls , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
return sum(row[i] * column[i] for i in range(len(lowerCAmelCase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 401
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase = LongformerTokenizer
_lowerCamelCase = True
_lowerCamelCase = LongformerTokenizerFast
_lowerCamelCase = True
def lowerCamelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCAmelCase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_UpperCAmelCase = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
_UpperCAmelCase = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_UpperCAmelCase = {"""unk_token""": """<unk>"""}
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCamelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCamelCase ) )
def lowerCamelCase ( self : List[Any] , **lowerCamelCase : str ) -> Tuple:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase )
def lowerCamelCase ( self : int , **lowerCamelCase : str ) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase )
def lowerCamelCase ( self : Union[str, Any] , lowerCamelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = """lower newer"""
_UpperCAmelCase = """lower newer"""
return input_text, output_text
def lowerCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
_UpperCAmelCase = """lower newer"""
_UpperCAmelCase = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
_UpperCAmelCase = tokenizer.tokenize(lowerCamelCase ) # , add_prefix_space=True)
self.assertListEqual(lowerCamelCase , lowerCamelCase )
_UpperCAmelCase = tokens + [tokenizer.unk_token]
_UpperCAmelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) , lowerCamelCase )
def lowerCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=lowerCamelCase ) , [0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=lowerCamelCase ) , [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] , )
@slow
def lowerCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
_UpperCAmelCase = tokenizer.encode("""sequence builders""" , add_special_tokens=lowerCamelCase )
_UpperCAmelCase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowerCamelCase )
_UpperCAmelCase = tokenizer.encode(
"""sequence builders""" , add_special_tokens=lowerCamelCase , add_prefix_space=lowerCamelCase )
_UpperCAmelCase = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=lowerCamelCase , add_prefix_space=lowerCamelCase )
_UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCamelCase )
_UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCamelCase , lowerCamelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowerCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = """Encode this sequence."""
_UpperCAmelCase = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
_UpperCAmelCase = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase , add_prefix_space=lowerCamelCase )
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowerCamelCase , lowerCamelCase )
_UpperCAmelCase = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase , add_prefix_space=lowerCamelCase )
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowerCamelCase , lowerCamelCase )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
_UpperCAmelCase = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowerCamelCase , lowerCamelCase )
# Testing spaces after special tokens
_UpperCAmelCase = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase )} ) # mask token has a left space
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(lowerCamelCase )
_UpperCAmelCase = """Encode <mask> sequence"""
_UpperCAmelCase = """Encode <mask>sequence"""
_UpperCAmelCase = tokenizer.encode(lowerCamelCase )
_UpperCAmelCase = encoded.index(lowerCamelCase )
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowerCamelCase , lowerCamelCase )
_UpperCAmelCase = tokenizer.encode(lowerCamelCase )
_UpperCAmelCase = encoded.index(lowerCamelCase )
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowerCamelCase , lowerCamelCase )
def lowerCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def lowerCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
_UpperCAmelCase = self.tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
_UpperCAmelCase = """A, <mask> AllenNLP sentence."""
_UpperCAmelCase = tokenizer_r.encode_plus(lowerCamelCase , add_special_tokens=lowerCamelCase , return_token_type_ids=lowerCamelCase )
_UpperCAmelCase = tokenizer_p.encode_plus(lowerCamelCase , add_special_tokens=lowerCamelCase , return_token_type_ids=lowerCamelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
_UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
_UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
lowerCamelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
lowerCamelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def lowerCamelCase ( self : int ) -> str:
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase )
_UpperCAmelCase = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
_UpperCAmelCase = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , lowerCamelCase )
self.assertEqual(post_processor_state["""add_prefix_space"""] , lowerCamelCase )
self.assertEqual(post_processor_state["""trim_offsets"""] , lowerCamelCase )
def lowerCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_UpperCAmelCase = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
_UpperCAmelCase = f"""{text_of_1_token} {text_of_1_token}"""
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase , use_fast=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase )
_UpperCAmelCase = tokenizer_r(lowerCamelCase , return_offsets_mapping=lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase ) + 1, len(lowerCamelCase ) + 1 + len(lowerCamelCase )) , )
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase , use_fast=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase )
_UpperCAmelCase = tokenizer_r(lowerCamelCase , return_offsets_mapping=lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase ) + 1, len(lowerCamelCase ) + 1 + len(lowerCamelCase )) , )
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase , use_fast=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase )
_UpperCAmelCase = tokenizer_r(lowerCamelCase , return_offsets_mapping=lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase ), len(lowerCamelCase ) + 1 + len(lowerCamelCase )) , )
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase , use_fast=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase )
_UpperCAmelCase = tokenizer_r(lowerCamelCase , return_offsets_mapping=lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase ), len(lowerCamelCase ) + 1 + len(lowerCamelCase )) , )
_UpperCAmelCase = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase , use_fast=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase )
_UpperCAmelCase = tokenizer_r(lowerCamelCase , return_offsets_mapping=lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase ) + 1, 1 + len(lowerCamelCase ) + 1 + len(lowerCamelCase )) , )
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase , use_fast=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase )
_UpperCAmelCase = tokenizer_r(lowerCamelCase , return_offsets_mapping=lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase ), 1 + len(lowerCamelCase ) + 1 + len(lowerCamelCase )) , )
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase , use_fast=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase )
_UpperCAmelCase = tokenizer_r(lowerCamelCase , return_offsets_mapping=lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase ), 1 + len(lowerCamelCase ) + 1 + len(lowerCamelCase )) , )
| 108
| 0
|
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class snake_case__ ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self : str, _snake_case : Tuple = None, _snake_case : List[Any] = None, _snake_case : Union[str, Any] = None, _snake_case : Dict = None, _snake_case : Optional[Any] = False, _snake_case : Optional[int] = False, _snake_case : Union[str, Any] = None, **_snake_case : int, ) ->Dict:
snake_case__ : Any = path_or_paths
snake_case__ : List[Any] = split if split or isinstance(lowerCAmelCase_, lowerCAmelCase_ ) else 'train'
snake_case__ : int = features
snake_case__ : Tuple = cache_dir
snake_case__ : Tuple = keep_in_memory
snake_case__ : Union[str, Any] = streaming
snake_case__ : Any = num_proc
snake_case__ : Optional[Any] = kwargs
@abstractmethod
def lowercase_ ( self : List[str] ) ->Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
pass
class snake_case__ ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self : str, _snake_case : str = None, _snake_case : Tuple = None, _snake_case : List[Any] = False, _snake_case : Optional[Any] = False, _snake_case : Optional[Any] = None, **_snake_case : List[Any], ) ->Dict:
snake_case__ : Any = features
snake_case__ : Union[str, Any] = cache_dir
snake_case__ : Dict = keep_in_memory
snake_case__ : List[str] = streaming
snake_case__ : List[Any] = num_proc
snake_case__ : Dict = kwargs
@abstractmethod
def lowercase_ ( self : List[str] ) ->Union[Dataset, IterableDataset]:
pass
| 717
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""image_processor""", """tokenizer"""]
_SCREAMING_SNAKE_CASE = """ViTImageProcessor"""
_SCREAMING_SNAKE_CASE = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : Any, _snake_case : str=None, _snake_case : List[Any]=None, **_snake_case : Tuple ) ->Optional[int]:
snake_case__ : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.', _snake_case, )
snake_case__ : Optional[Any] = kwargs.pop('feature_extractor' )
snake_case__ : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_snake_case, _snake_case )
def __call__( self : Tuple, _snake_case : Any=None, _snake_case : Optional[int]=None, _snake_case : str=None, _snake_case : str=None, **_snake_case : int ) ->Tuple:
if text is None and visual_prompt is None and images is None:
raise ValueError('You have to specify either text, visual prompt or images.' )
if text is not None and visual_prompt is not None:
raise ValueError('You have to specify exactly one type of prompt. Either text or visual prompt.' )
if text is not None:
snake_case__ : List[str] = self.tokenizer(_snake_case, return_tensors=_snake_case, **_snake_case )
if visual_prompt is not None:
snake_case__ : Any = self.image_processor(_snake_case, return_tensors=_snake_case, **_snake_case )
if images is not None:
snake_case__ : Tuple = self.image_processor(_snake_case, return_tensors=_snake_case, **_snake_case )
if visual_prompt is not None and images is not None:
snake_case__ : Dict = {
'pixel_values': image_features.pixel_values,
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
snake_case__ : Any = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
snake_case__ : List[Any] = {
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**_snake_case ), tensor_type=_snake_case )
def lowercase_ ( self : Tuple, *_snake_case : int, **_snake_case : List[str] ) ->Optional[Any]:
return self.tokenizer.batch_decode(*_snake_case, **_snake_case )
def lowercase_ ( self : List[str], *_snake_case : Dict, **_snake_case : Optional[Any] ) ->Tuple:
return self.tokenizer.decode(*_snake_case, **_snake_case )
@property
def lowercase_ ( self : Union[str, Any] ) ->Optional[int]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.', _snake_case, )
return self.image_processor_class
@property
def lowercase_ ( self : Optional[Any] ) ->List[Any]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.', _snake_case, )
return self.image_processor
| 243
| 0
|
"""simple docstring"""
from __future__ import annotations
import math
from collections.abc import Callable
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = 100 ,) -> float:
"""simple docstring"""
_UpperCamelCase : Any = x_start
_UpperCamelCase : Optional[int] = fnc(lowercase_ )
_UpperCamelCase : int = 0.0
for _ in range(lowercase_ ):
# Approximates curve as a sequence of linear lines and sums their length
_UpperCamelCase : int = (x_end - x_start) / steps + xa
_UpperCamelCase : Tuple = fnc(lowercase_ )
length += math.hypot(xa - xa ,fxa - fxa )
# Increment step
_UpperCamelCase : List[Any] = xa
_UpperCamelCase : int = fxa
return length
if __name__ == "__main__":
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
return math.sin(10 * x )
print("f(x) = sin(10 * x)")
print("The length of the curve from x = -10 to x = 10 is:")
lowerCamelCase__ = 10
while i <= 10_0000:
print(f"""With {i} steps: {line_length(f, -10, 10, i)}""")
i *= 10
| 624
|
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = ["model.decoder.embed_positions.weights"]
def lowercase__ ( lowercase_ ) -> Dict:
"""simple docstring"""
if "emb" in name:
_UpperCamelCase : Optional[int] = name.replace("emb" ,"model.decoder.embed_tokens" )
if "transformer" in name:
_UpperCamelCase : List[Any] = name.replace("transformer" ,"model.decoder" )
if "cross_attention" in name:
_UpperCamelCase : Any = name.replace("cross_attention" ,"encoder_attn" )
if "linear1" in name:
_UpperCamelCase : List[str] = name.replace("linear1" ,"fc1" )
if "linear2" in name:
_UpperCamelCase : Optional[Any] = name.replace("linear2" ,"fc2" )
if "norm1" in name:
_UpperCamelCase : int = name.replace("norm1" ,"self_attn_layer_norm" )
if "norm_cross" in name:
_UpperCamelCase : Optional[int] = name.replace("norm_cross" ,"encoder_attn_layer_norm" )
if "norm2" in name:
_UpperCamelCase : Optional[Any] = name.replace("norm2" ,"final_layer_norm" )
if "out_norm" in name:
_UpperCamelCase : Tuple = name.replace("out_norm" ,"model.decoder.layer_norm" )
if "linears" in name:
_UpperCamelCase : List[str] = name.replace("linears" ,"lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
_UpperCamelCase : Optional[Any] = name.replace("condition_provider.conditioners.description.output_proj" ,"enc_to_dec_proj" )
return name
def lowercase__ ( lowercase_ ,lowercase_ ) -> Tuple[Dict, Dict]:
"""simple docstring"""
_UpperCamelCase : List[Any] = list(state_dict.keys() )
_UpperCamelCase : int = {}
for key in keys:
_UpperCamelCase : List[str] = state_dict.pop(lowercase_ )
_UpperCamelCase : List[Any] = rename_keys(lowercase_ )
if "in_proj_weight" in key:
# split fused qkv proj
_UpperCamelCase : List[Any] = val[:hidden_size, :]
_UpperCamelCase : List[Any] = val[hidden_size : 2 * hidden_size, :]
_UpperCamelCase : Tuple = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
_UpperCamelCase : Optional[Any] = val
else:
_UpperCamelCase : str = val
return state_dict, enc_dec_proj_state_dict
def lowercase__ ( lowercase_ ) -> MusicgenDecoderConfig:
"""simple docstring"""
if checkpoint == "small":
# default config values
_UpperCamelCase : Optional[Any] = 1_024
_UpperCamelCase : List[str] = 24
_UpperCamelCase : Optional[int] = 16
elif checkpoint == "medium":
_UpperCamelCase : int = 1_536
_UpperCamelCase : Any = 48
_UpperCamelCase : Optional[Any] = 24
elif checkpoint == "large":
_UpperCamelCase : int = 2_048
_UpperCamelCase : Dict = 48
_UpperCamelCase : Dict = 32
else:
raise ValueError(F'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
_UpperCamelCase : List[Any] = MusicgenDecoderConfig(
hidden_size=lowercase_ ,ffn_dim=hidden_size * 4 ,num_hidden_layers=lowercase_ ,num_attention_heads=lowercase_ ,)
return config
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_="cpu" ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : int = MusicGen.get_pretrained(lowercase_ ,device=lowercase_ )
_UpperCamelCase : int = decoder_config_from_checkpoint(lowercase_ )
_UpperCamelCase : int = fairseq_model.lm.state_dict()
_UpperCamelCase, _UpperCamelCase : Tuple = rename_state_dict(
lowercase_ ,hidden_size=decoder_config.hidden_size )
_UpperCamelCase : int = TaEncoderModel.from_pretrained("t5-base" )
_UpperCamelCase : str = EncodecModel.from_pretrained("facebook/encodec_32khz" )
_UpperCamelCase : Dict = MusicgenForCausalLM(lowercase_ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
_UpperCamelCase, _UpperCamelCase : List[str] = decoder.load_state_dict(lowercase_ ,strict=lowercase_ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(lowercase_ )
if len(lowercase_ ) > 0:
raise ValueError(F'''Missing key(s) in state_dict: {missing_keys}''' )
if len(lowercase_ ) > 0:
raise ValueError(F'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
_UpperCamelCase : Tuple = MusicgenForConditionalGeneration(text_encoder=lowercase_ ,audio_encoder=lowercase_ ,decoder=lowercase_ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(lowercase_ )
# check we can do a forward pass
_UpperCamelCase : Any = torch.arange(0 ,8 ,dtype=torch.long ).reshape(2 ,-1 )
_UpperCamelCase : List[Any] = input_ids.reshape(2 * 4 ,-1 )
with torch.no_grad():
_UpperCamelCase : List[str] = model(input_ids=lowercase_ ,decoder_input_ids=lowercase_ ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
_UpperCamelCase : str = AutoTokenizer.from_pretrained("t5-base" )
_UpperCamelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" ,padding_side="left" )
_UpperCamelCase : Any = MusicgenProcessor(feature_extractor=lowercase_ ,tokenizer=lowercase_ )
# set the appropriate bos/pad token ids
_UpperCamelCase : Optional[Any] = 2_048
_UpperCamelCase : str = 2_048
# set other default generation config params
_UpperCamelCase : Optional[int] = int(30 * audio_encoder.config.frame_rate )
_UpperCamelCase : Optional[Any] = True
_UpperCamelCase : Any = 3.0
if pytorch_dump_folder is not None:
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
logger.info(F'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(lowercase_ )
processor.save_pretrained(lowercase_ )
if repo_id:
logger.info(F'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(lowercase_ )
processor.push_to_hub(lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
lowerCamelCase__ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 624
| 1
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A : Tuple = logging.get_logger(__name__)
__A : List[str] = {
'salesforce/blip2-opt-2.7b': 'https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json',
}
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
_UpperCamelCase:Optional[int] = "blip_2_vision_model"
def __init__( self , _SCREAMING_SNAKE_CASE=1408 , _SCREAMING_SNAKE_CASE=6144 , _SCREAMING_SNAKE_CASE=39 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=224 , _SCREAMING_SNAKE_CASE=14 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.0_0_0_0_1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=1E-10 , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , )-> Dict:
super().__init__(**_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =hidden_size
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =patch_size
lowerCamelCase_ =image_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =attention_dropout
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =hidden_act
lowerCamelCase_ =qkv_bias
@classmethod
def _snake_case ( cls , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )-> "PretrainedConfig":
cls._set_token_in_kwargs(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ , lowerCamelCase_ =cls.get_config_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get("""model_type""" ) == "blip-2":
lowerCamelCase_ =config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
_UpperCamelCase:str = "blip_2_qformer"
def __init__( self , _SCREAMING_SNAKE_CASE=3_0522 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=1408 , **_SCREAMING_SNAKE_CASE , )-> List[Any]:
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =vocab_size
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =hidden_act
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =initializer_range
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =position_embedding_type
lowerCamelCase_ =cross_attention_frequency
lowerCamelCase_ =encoder_hidden_size
@classmethod
def _snake_case ( cls , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )-> "PretrainedConfig":
cls._set_token_in_kwargs(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ , lowerCamelCase_ =cls.get_config_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get("""model_type""" ) == "blip-2":
lowerCamelCase_ =config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
_UpperCamelCase:Any = "blip-2"
_UpperCamelCase:Optional[Any] = True
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=32 , **_SCREAMING_SNAKE_CASE )-> int:
super().__init__(**_SCREAMING_SNAKE_CASE )
if vision_config is None:
lowerCamelCase_ ={}
logger.info("""vision_config is None. initializing the Blip2VisionConfig with default values.""" )
if qformer_config is None:
lowerCamelCase_ ={}
logger.info("""qformer_config is None. Initializing the Blip2QFormerConfig with default values.""" )
if text_config is None:
lowerCamelCase_ ={}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" )
lowerCamelCase_ =BlipaVisionConfig(**_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =BlipaQFormerConfig(**_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =text_config["""model_type"""] if """model_type""" in text_config else """opt"""
lowerCamelCase_ =CONFIG_MAPPING[text_model_type](**_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =self.text_config.tie_word_embeddings
lowerCamelCase_ =self.text_config.is_encoder_decoder
lowerCamelCase_ =num_query_tokens
lowerCamelCase_ =self.vision_config.hidden_size
lowerCamelCase_ =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
lowerCamelCase_ =1.0
lowerCamelCase_ =0.0_2
@classmethod
def _snake_case ( cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )-> Any:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_SCREAMING_SNAKE_CASE , )
def _snake_case ( self )-> Union[str, Any]:
lowerCamelCase_ =copy.deepcopy(self.__dict__ )
lowerCamelCase_ =self.vision_config.to_dict()
lowerCamelCase_ =self.qformer_config.to_dict()
lowerCamelCase_ =self.text_config.to_dict()
lowerCamelCase_ =self.__class__.model_type
return output
| 75
|
from collections import deque
from math import floor
from random import random
from time import time
class _SCREAMING_SNAKE_CASE :
def __init__( self )-> List[str]:
lowerCamelCase_ ={}
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1 )-> List[Any]:
if self.graph.get(_SCREAMING_SNAKE_CASE ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
lowerCamelCase_ =[[w, v]]
if not self.graph.get(_SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =[]
def _snake_case ( self )-> str:
return list(self.graph )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Dict:
if self.graph.get(_SCREAMING_SNAKE_CASE ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 , _SCREAMING_SNAKE_CASE=-1 )-> Optional[Any]:
if s == d:
return []
lowerCamelCase_ =[]
lowerCamelCase_ =[]
if s == -2:
lowerCamelCase_ =list(self.graph )[0]
stack.append(_SCREAMING_SNAKE_CASE )
visited.append(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase_ =s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(_SCREAMING_SNAKE_CASE )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase_ =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(_SCREAMING_SNAKE_CASE ) != 0:
lowerCamelCase_ =stack[len(_SCREAMING_SNAKE_CASE ) - 1]
else:
lowerCamelCase_ =ss
# check if se have reached the starting point
if len(_SCREAMING_SNAKE_CASE ) == 0:
return visited
def _snake_case ( self , _SCREAMING_SNAKE_CASE=-1 )-> Optional[int]:
if c == -1:
lowerCamelCase_ =floor(random() * 1_0000 ) + 10
for i in range(_SCREAMING_SNAKE_CASE ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowerCamelCase_ =floor(random() * c ) + 1
if n != i:
self.add_pair(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 1 )
def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 )-> Any:
lowerCamelCase_ =deque()
lowerCamelCase_ =[]
if s == -2:
lowerCamelCase_ =list(self.graph )[0]
d.append(_SCREAMING_SNAKE_CASE )
visited.append(_SCREAMING_SNAKE_CASE )
while d:
lowerCamelCase_ =d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> List[Any]:
lowerCamelCase_ =0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> List[str]:
return len(self.graph[u] )
def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 )-> Union[str, Any]:
lowerCamelCase_ =[]
lowerCamelCase_ =[]
if s == -2:
lowerCamelCase_ =list(self.graph )[0]
stack.append(_SCREAMING_SNAKE_CASE )
visited.append(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =s
lowerCamelCase_ =[]
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase_ =s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase_ =node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(_SCREAMING_SNAKE_CASE ) != 0:
lowerCamelCase_ =stack[len(_SCREAMING_SNAKE_CASE ) - 1]
else:
lowerCamelCase_ =ss
# check if se have reached the starting point
if len(_SCREAMING_SNAKE_CASE ) == 0:
return sorted_nodes
def _snake_case ( self )-> str:
lowerCamelCase_ =[]
lowerCamelCase_ =[]
lowerCamelCase_ =list(self.graph )[0]
stack.append(_SCREAMING_SNAKE_CASE )
visited.append(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =-2
lowerCamelCase_ =[]
lowerCamelCase_ =s
lowerCamelCase_ =False
lowerCamelCase_ =set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase_ =s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCamelCase_ =len(_SCREAMING_SNAKE_CASE ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase_ =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCamelCase_ =True
if len(_SCREAMING_SNAKE_CASE ) != 0:
lowerCamelCase_ =stack[len(_SCREAMING_SNAKE_CASE ) - 1]
else:
lowerCamelCase_ =False
indirect_parents.append(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =s
lowerCamelCase_ =ss
# check if se have reached the starting point
if len(_SCREAMING_SNAKE_CASE ) == 0:
return list(_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Tuple:
lowerCamelCase_ =[]
lowerCamelCase_ =[]
lowerCamelCase_ =list(self.graph )[0]
stack.append(_SCREAMING_SNAKE_CASE )
visited.append(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =-2
lowerCamelCase_ =[]
lowerCamelCase_ =s
lowerCamelCase_ =False
lowerCamelCase_ =set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase_ =s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCamelCase_ =len(_SCREAMING_SNAKE_CASE ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase_ =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCamelCase_ =True
if len(_SCREAMING_SNAKE_CASE ) != 0:
lowerCamelCase_ =stack[len(_SCREAMING_SNAKE_CASE ) - 1]
else:
lowerCamelCase_ =False
indirect_parents.append(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =s
lowerCamelCase_ =ss
# check if se have reached the starting point
if len(_SCREAMING_SNAKE_CASE ) == 0:
return False
def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 , _SCREAMING_SNAKE_CASE=-1 )-> List[str]:
lowerCamelCase_ =time()
self.dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =time()
return end - begin
def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 )-> List[str]:
lowerCamelCase_ =time()
self.bfs(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =time()
return end - begin
class _SCREAMING_SNAKE_CASE :
def __init__( self )-> Optional[Any]:
lowerCamelCase_ ={}
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1 )-> List[str]:
# check if the u exists
if self.graph.get(_SCREAMING_SNAKE_CASE ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
lowerCamelCase_ =[[w, v]]
# add the other way
if self.graph.get(_SCREAMING_SNAKE_CASE ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
lowerCamelCase_ =[[w, u]]
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Tuple:
if self.graph.get(_SCREAMING_SNAKE_CASE ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(_SCREAMING_SNAKE_CASE )
# the other way round
if self.graph.get(_SCREAMING_SNAKE_CASE ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 , _SCREAMING_SNAKE_CASE=-1 )-> int:
if s == d:
return []
lowerCamelCase_ =[]
lowerCamelCase_ =[]
if s == -2:
lowerCamelCase_ =list(self.graph )[0]
stack.append(_SCREAMING_SNAKE_CASE )
visited.append(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase_ =s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(_SCREAMING_SNAKE_CASE )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase_ =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(_SCREAMING_SNAKE_CASE ) != 0:
lowerCamelCase_ =stack[len(_SCREAMING_SNAKE_CASE ) - 1]
else:
lowerCamelCase_ =ss
# check if se have reached the starting point
if len(_SCREAMING_SNAKE_CASE ) == 0:
return visited
def _snake_case ( self , _SCREAMING_SNAKE_CASE=-1 )-> Optional[int]:
if c == -1:
lowerCamelCase_ =floor(random() * 1_0000 ) + 10
for i in range(_SCREAMING_SNAKE_CASE ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowerCamelCase_ =floor(random() * c ) + 1
if n != i:
self.add_pair(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 1 )
def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 )-> List[str]:
lowerCamelCase_ =deque()
lowerCamelCase_ =[]
if s == -2:
lowerCamelCase_ =list(self.graph )[0]
d.append(_SCREAMING_SNAKE_CASE )
visited.append(_SCREAMING_SNAKE_CASE )
while d:
lowerCamelCase_ =d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Union[str, Any]:
return len(self.graph[u] )
def _snake_case ( self )-> Any:
lowerCamelCase_ =[]
lowerCamelCase_ =[]
lowerCamelCase_ =list(self.graph )[0]
stack.append(_SCREAMING_SNAKE_CASE )
visited.append(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =-2
lowerCamelCase_ =[]
lowerCamelCase_ =s
lowerCamelCase_ =False
lowerCamelCase_ =set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase_ =s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCamelCase_ =len(_SCREAMING_SNAKE_CASE ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase_ =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCamelCase_ =True
if len(_SCREAMING_SNAKE_CASE ) != 0:
lowerCamelCase_ =stack[len(_SCREAMING_SNAKE_CASE ) - 1]
else:
lowerCamelCase_ =False
indirect_parents.append(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =s
lowerCamelCase_ =ss
# check if se have reached the starting point
if len(_SCREAMING_SNAKE_CASE ) == 0:
return list(_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Any:
lowerCamelCase_ =[]
lowerCamelCase_ =[]
lowerCamelCase_ =list(self.graph )[0]
stack.append(_SCREAMING_SNAKE_CASE )
visited.append(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =-2
lowerCamelCase_ =[]
lowerCamelCase_ =s
lowerCamelCase_ =False
lowerCamelCase_ =set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase_ =s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCamelCase_ =len(_SCREAMING_SNAKE_CASE ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase_ =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCamelCase_ =True
if len(_SCREAMING_SNAKE_CASE ) != 0:
lowerCamelCase_ =stack[len(_SCREAMING_SNAKE_CASE ) - 1]
else:
lowerCamelCase_ =False
indirect_parents.append(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =s
lowerCamelCase_ =ss
# check if se have reached the starting point
if len(_SCREAMING_SNAKE_CASE ) == 0:
return False
def _snake_case ( self )-> Optional[Any]:
return list(self.graph )
def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 , _SCREAMING_SNAKE_CASE=-1 )-> str:
lowerCamelCase_ =time()
self.dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =time()
return end - begin
def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 )-> Dict:
lowerCamelCase_ =time()
self.bfs(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =time()
return end - begin
| 75
| 1
|
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def lowercase_ ( _UpperCamelCase=None ):
'''simple docstring'''
if subparsers is not None:
__lowercase = subparsers.add_parser('''env''' )
else:
__lowercase = argparse.ArgumentParser('''Accelerate env command''' )
parser.add_argument(
'''--config_file''' , default=__UpperCAmelCase , help='''The config file to use for the default values in the launching script.''' )
if subparsers is not None:
parser.set_defaults(func=__UpperCAmelCase )
return parser
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
__lowercase = torch.__version__
__lowercase = torch.cuda.is_available()
__lowercase = is_xpu_available()
__lowercase = is_npu_available()
__lowercase = """Not found"""
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(__UpperCAmelCase ):
__lowercase = load_config_from_file(args.config_file ).to_dict()
__lowercase = {
"""`Accelerate` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Numpy version""": np.__version__,
"""PyTorch version (GPU?)""": F'{pt_version} ({pt_cuda_available})',
"""PyTorch XPU available""": str(__UpperCAmelCase ),
"""PyTorch NPU available""": str(__UpperCAmelCase ),
"""System RAM""": F'{psutil.virtual_memory().total / 10_24 ** 3:.2f} GB',
}
if pt_cuda_available:
__lowercase = torch.cuda.get_device_name()
print('''\nCopy-and-paste the text below in your GitHub issue\n''' )
print('''\n'''.join([F'- {prop}: {val}' for prop, val in info.items()] ) )
print('''- `Accelerate` default config:''' if args.config_file is None else '''- `Accelerate` config passed:''' )
__lowercase = (
"""\n""".join([F'\t- {prop}: {val}' for prop, val in accelerate_config.items()] )
if isinstance(__UpperCAmelCase , __UpperCAmelCase )
else F'\t{accelerate_config}'
)
print(__UpperCAmelCase )
__lowercase = accelerate_config
return info
def lowercase_ ( ):
'''simple docstring'''
__lowercase = env_command_parser()
__lowercase = parser.parse_args()
env_command(__UpperCAmelCase )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 639
|
"""simple docstring"""
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
UpperCAmelCase__ = OpenAIGPTTokenizer
UpperCAmelCase__ = OpenAIGPTTokenizerFast
UpperCAmelCase__ = True
UpperCAmelCase__ = False
def lowerCamelCase__ ( self : Dict ) -> Optional[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__magic_name__: str = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
__magic_name__: Any = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
__magic_name__: List[str] = ["""#version: 0.2""", """l o""", """lo w""", """e r</w>""", """"""]
__magic_name__: Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__magic_name__: Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(__snake_case ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(__snake_case ) )
def lowerCamelCase__ ( self : List[Any] , __snake_case : Optional[Any] ) -> Optional[int]:
return "lower newer", "lower newer"
def lowerCamelCase__ ( self : List[Any] ) -> Union[str, Any]:
__magic_name__: Optional[Any] = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
__magic_name__: Union[str, Any] = """lower"""
__magic_name__: Optional[int] = ["""low""", """er</w>"""]
__magic_name__: List[Any] = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
__magic_name__: Optional[int] = tokens + ["""<unk>"""]
__magic_name__: List[str] = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : Any=1_5 ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__magic_name__: str = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
# Simple input
__magic_name__: Dict = """This is a simple input"""
__magic_name__: Any = ["""This is a simple input 1""", """This is a simple input 2"""]
__magic_name__: int = ("""This is a simple input""", """This is a pair""")
__magic_name__: int = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(__snake_case , tokenizer_r.encode , __snake_case , max_length=__snake_case , padding="""max_length""" )
# Simple input
self.assertRaises(__snake_case , tokenizer_r.encode_plus , __snake_case , max_length=__snake_case , padding="""max_length""" )
# Simple input
self.assertRaises(
__snake_case , tokenizer_r.batch_encode_plus , __snake_case , max_length=__snake_case , padding="""max_length""" , )
# Pair input
self.assertRaises(__snake_case , tokenizer_r.encode , __snake_case , max_length=__snake_case , padding="""max_length""" )
# Pair input
self.assertRaises(__snake_case , tokenizer_r.encode_plus , __snake_case , max_length=__snake_case , padding="""max_length""" )
# Pair input
self.assertRaises(
__snake_case , tokenizer_r.batch_encode_plus , __snake_case , max_length=__snake_case , padding="""max_length""" , )
def lowerCamelCase__ ( self : Dict ) -> Any:
pass
@require_ftfy
@require_spacy
@require_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ ):
pass
| 96
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="None" , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , )->List[str]:
'''simple docstring'''
A_ : Tuple = parent
A_ : List[Any] = batch_size
A_ : Union[str, Any] = seq_length
A_ : Dict = is_training
A_ : Any = use_input_mask
A_ : int = use_token_type_ids
A_ : Tuple = use_labels
A_ : str = vocab_size
A_ : Optional[Any] = hidden_size
A_ : Any = num_hidden_layers
A_ : Tuple = num_attention_heads
A_ : List[str] = intermediate_size
A_ : Optional[int] = hidden_act
A_ : Tuple = hidden_dropout_prob
A_ : int = attention_probs_dropout_prob
A_ : Union[str, Any] = max_position_embeddings
A_ : str = type_vocab_size
A_ : Union[str, Any] = type_sequence_label_size
A_ : List[Any] = initializer_range
A_ : List[str] = num_labels
A_ : List[Any] = num_choices
A_ : Any = relative_attention
A_ : Tuple = position_biased_input
A_ : Dict = pos_att_type
A_ : str = scope
def _snake_case ( self )->List[str]:
'''simple docstring'''
A_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : List[Any] = None
if self.use_input_mask:
A_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
A_ : Union[str, Any] = None
if self.use_token_type_ids:
A_ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ : List[str] = None
A_ : List[str] = None
A_ : Any = None
if self.use_labels:
A_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ : str = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=_SCREAMING_SNAKE_CASE , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->List[str]:
'''simple docstring'''
A_ : Dict = TFDebertaVaModel(config=_SCREAMING_SNAKE_CASE )
A_ : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A_ : Dict = [input_ids, input_mask]
A_ : str = model(_SCREAMING_SNAKE_CASE )
A_ : str = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Optional[Any]:
'''simple docstring'''
A_ : Optional[int] = TFDebertaVaForMaskedLM(config=_SCREAMING_SNAKE_CASE )
A_ : List[str] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
A_ : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->int:
'''simple docstring'''
A_ : str = self.num_labels
A_ : Tuple = TFDebertaVaForSequenceClassification(config=_SCREAMING_SNAKE_CASE )
A_ : List[str] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
A_ : Union[str, Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Any:
'''simple docstring'''
A_ : Any = self.num_labels
A_ : Any = TFDebertaVaForTokenClassification(config=_SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
A_ : Dict = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Optional[int]:
'''simple docstring'''
A_ : Any = TFDebertaVaForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
A_ : Optional[int] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self )->Any:
'''simple docstring'''
A_ : List[Any] = self.prepare_config_and_inputs()
(
A_
) : Any = config_and_inputs
A_ : List[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
snake_case = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
snake_case = (
{
"feature-extraction": TFDebertaVaModel,
"fill-mask": TFDebertaVaForMaskedLM,
"question-answering": TFDebertaVaForQuestionAnswering,
"text-classification": TFDebertaVaForSequenceClassification,
"token-classification": TFDebertaVaForTokenClassification,
"zero-shot": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case = False
snake_case = False
def _snake_case ( self )->Tuple:
'''simple docstring'''
A_ : List[str] = TFDebertaVaModelTester(self )
A_ : str = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def _snake_case ( self )->Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->List[Any]:
'''simple docstring'''
A_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->List[str]:
'''simple docstring'''
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_SCREAMING_SNAKE_CASE )
@slow
def _snake_case ( self )->str:
'''simple docstring'''
A_ : Optional[Any] = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@require_tf
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='''Model not available yet''' )
def _snake_case ( self )->Tuple:
'''simple docstring'''
pass
@slow
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
A_ : Any = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' )
A_ : Optional[int] = tf.constant([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
A_ : List[Any] = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
A_ : Dict = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )[0]
A_ : Any = tf.constant(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , _SCREAMING_SNAKE_CASE , atol=1e-4 )
| 719
|
from functools import reduce
UpperCamelCase = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE = N ):
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str(int(SCREAMING_SNAKE_CASE ) * int(SCREAMING_SNAKE_CASE ) ) , n[i : i + 13] ) )
for i in range(len(SCREAMING_SNAKE_CASE ) - 12 ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 152
| 0
|
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__A = get_tests_dir("fixtures/test_sentencepiece_with_bytefallback.model")
@require_sentencepiece
@require_tokenizers
class _A ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : List[str] = GPTSwaTokenizer
lowerCamelCase : int = False
lowerCamelCase : Tuple = True
lowerCamelCase : List[str] = False
def _a ( self : Union[str, Any] ) -> Any:
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCAmelCase =GPTSwaTokenizer(__SCREAMING_SNAKE_CASE , eos_token="""<unk>""" , bos_token="""<unk>""" , pad_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ) -> Dict:
__UpperCAmelCase ="""This is a test"""
__UpperCAmelCase ="""This is a test"""
return input_text, output_text
def _a ( self : List[Any] ) -> List[str]:
__UpperCAmelCase ="""<s>"""
__UpperCAmelCase =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def _a ( self : List[str] ) -> Dict:
__UpperCAmelCase =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2000 )
def _a ( self : List[Any] ) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 2000 )
def _a ( self : Union[str, Any] ) -> Union[str, Any]:
__UpperCAmelCase =GPTSwaTokenizer(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__SCREAMING_SNAKE_CASE , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [465, 287, 265, 631, 842] )
__UpperCAmelCase =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
# fmt: off
self.assertListEqual(
__SCREAMING_SNAKE_CASE , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] , )
# fmt: on
__UpperCAmelCase =tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
__UpperCAmelCase =tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
# fmt: off
self.assertListEqual(
__SCREAMING_SNAKE_CASE , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] )
# fmt: on
def _a ( self : List[str] ) -> Tuple:
__UpperCAmelCase =GPTSwaTokenizer(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =["""This is a test""", """I was born in 92000, and this is falsé."""]
__UpperCAmelCase =[
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertListEqual(tokenizer.encode_fast(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# Test that decode_fast returns the input text
for text, token_ids in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(tokenizer.decode_fast(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
@slow
def _a ( self : str ) -> List[Any]:
__UpperCAmelCase =[
"""<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')""",
"""Hey there, how are you doing this fine day?""",
"""This is a text with a trailing spaces followed by a dot .""",
"""Häj sväjs lillebrör! =)""",
"""Det är inget fel på Mr. Cool""",
]
# fmt: off
__UpperCAmelCase ={"""input_ids""": [[63423, 5, 6811, 14954, 282, 816, 3821, 63466, 63425, 63462, 18, 63978, 678, 301, 1320, 63423, 63455, 63458, 18, 63982, 4246, 3940, 1901, 47789, 5547, 18994], [19630, 1100, 63446, 1342, 633, 544, 4488, 593, 5102, 2416, 63495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 58593, 22413, 9106, 546, 268, 33213, 63979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55130, 63450, 924, 63449, 2249, 4062, 1558, 318, 63504, 21498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 63443, 26801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE , model_name="""AI-Sweden/gpt-sw3-126m""" , sequences=__SCREAMING_SNAKE_CASE , )
| 68
|
'''simple docstring'''
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( snake_case_ ):
"""simple docstring"""
A__ : Union[str, Any] = (DDIMParallelScheduler,)
A__ : Optional[Any] = (('''eta''', 0.0), ('''num_inference_steps''', 50))
def a__ ( self , **A ) -> Union[str, Any]:
A: str = {
"""num_train_timesteps""": 10_00,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""clip_sample""": True,
}
config.update(**A )
return config
def a__ ( self , **A ) -> Tuple:
A: Optional[int] = self.scheduler_classes[0]
A: Optional[Any] = self.get_scheduler_config(**A )
A: int = scheduler_class(**A )
A , A: Union[str, Any] = 10, 0.0
A: List[str] = self.dummy_model()
A: Union[str, Any] = self.dummy_sample_deter
scheduler.set_timesteps(A )
for t in scheduler.timesteps:
A: List[str] = model(A , A )
A: Optional[int] = scheduler.step(A , A , A , A ).prev_sample
return sample
def a__ ( self ) -> Dict:
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=A )
def a__ ( self ) -> Dict:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=A )
A: List[Any] = self.scheduler_classes[0]
A: List[Any] = self.get_scheduler_config(steps_offset=1 )
A: int = scheduler_class(**A )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) )
def a__ ( self ) -> int:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=A , beta_end=A )
def a__ ( self ) -> int:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=A )
def a__ ( self ) -> Optional[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A )
def a__ ( self ) -> List[str]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=A )
def a__ ( self ) -> Optional[int]:
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=A )
def a__ ( self ) -> Tuple:
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=A )
def a__ ( self ) -> Tuple:
self.check_over_configs(thresholding=A )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=A , prediction_type=A , sample_max_value=A , )
def a__ ( self ) -> Union[str, Any]:
for t in [1, 10, 49]:
self.check_over_forward(time_step=A )
def a__ ( self ) -> int:
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00] ):
self.check_over_forward(time_step=A , num_inference_steps=A )
def a__ ( self ) -> Optional[Any]:
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=A , eta=A )
def a__ ( self ) -> Union[str, Any]:
A: Tuple = self.scheduler_classes[0]
A: List[Any] = self.get_scheduler_config()
A: int = scheduler_class(**A )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00 ) - 0.14771 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60 ) - 0.32460 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86 ) - 0.00979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98 ) - 0.02 ) ) < 1e-5
def a__ ( self ) -> Dict:
A: Optional[Any] = self.scheduler_classes[0]
A: int = self.get_scheduler_config()
A: int = scheduler_class(**A )
A , A: str = 10, 0.0
scheduler.set_timesteps(A )
A: Tuple = self.dummy_model()
A: Optional[int] = self.dummy_sample_deter
A: int = self.dummy_sample_deter + 0.1
A: List[str] = self.dummy_sample_deter - 0.1
A: Any = samplea.shape[0]
A: int = torch.stack([samplea, samplea, samplea] , dim=0 )
A: Optional[Any] = torch.arange(A )[0:3, None].repeat(1 , A )
A: List[str] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
A: Dict = scheduler.batch_step_no_noise(A , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , A )
A: Tuple = torch.sum(torch.abs(A ) )
A: Tuple = torch.mean(torch.abs(A ) )
assert abs(result_sum.item() - 1147.7904 ) < 1e-2
assert abs(result_mean.item() - 0.4982 ) < 1e-3
def a__ ( self ) -> Tuple:
A: Dict = self.full_loop()
A: str = torch.sum(torch.abs(A ) )
A: Union[str, Any] = torch.mean(torch.abs(A ) )
assert abs(result_sum.item() - 172.0067 ) < 1e-2
assert abs(result_mean.item() - 0.223967 ) < 1e-3
def a__ ( self ) -> Optional[Any]:
A: str = self.full_loop(prediction_type="""v_prediction""" )
A: Any = torch.sum(torch.abs(A ) )
A: Union[str, Any] = torch.mean(torch.abs(A ) )
assert abs(result_sum.item() - 52.5302 ) < 1e-2
assert abs(result_mean.item() - 0.0684 ) < 1e-3
def a__ ( self ) -> List[Any]:
# We specify different beta, so that the first alpha is 0.99
A: Dict = self.full_loop(set_alpha_to_one=A , beta_start=0.01 )
A: Tuple = torch.sum(torch.abs(A ) )
A: Union[str, Any] = torch.mean(torch.abs(A ) )
assert abs(result_sum.item() - 149.8295 ) < 1e-2
assert abs(result_mean.item() - 0.1951 ) < 1e-3
def a__ ( self ) -> Tuple:
# We specify different beta, so that the first alpha is 0.99
A: List[Any] = self.full_loop(set_alpha_to_one=A , beta_start=0.01 )
A: List[Any] = torch.sum(torch.abs(A ) )
A: Optional[int] = torch.mean(torch.abs(A ) )
assert abs(result_sum.item() - 149.0784 ) < 1e-2
assert abs(result_mean.item() - 0.1941 ) < 1e-3
| 135
| 0
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.6, 'eval_loss': 0.9},
},
{
'framework': 'tensorflow',
'script': 'run_tf.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.3, 'eval_loss': 0.9},
},
] )
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Any ):
if self.framework == "pytorch":
subprocess.run(
F"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding='''utf-8''' , check=__snake_case , )
assert hasattr(self , '''env''' )
def snake_case ( self : List[str] , __snake_case : List[str]=1 ):
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"{self.env.base_job_name}-single" , instance_count=__snake_case , instance_type=self.instance_type , debugger_hook_config=__snake_case , hyperparameters={**self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='''py36''' , )
def snake_case ( self : str , __snake_case : Tuple ):
TrainingJobAnalytics(__snake_case ).export_csv(F"{self.env.test_path}/{job_name}_metrics.csv" )
def snake_case ( self : Union[str, Any] ):
# create estimator
lowerCamelCase :List[str] = self.create_estimator()
# run training
estimator.fit()
# result dataframe
lowerCamelCase :Any = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCamelCase :Tuple = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
lowerCamelCase :int = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCamelCase :Tuple = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"{estimator.latest_training_job.name}.json" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , __snake_case )
| 49
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
A__ = imread(R"""digital_image_processing/image_data/lena_small.jpg""")
A__ = cvtColor(img, COLOR_BGR2GRAY)
def _lowerCamelCase ( ):
lowerCamelCase :int = cn.convert_to_negative(a_)
# assert negative_img array for at least one True
assert negative_img.any()
def _lowerCamelCase ( ):
with Image.open('''digital_image_processing/image_data/lena_small.jpg''') as img:
# Work around assertion for response
assert str(cc.change_contrast(a_ , 1_10)).startswith(
'''<PIL.Image.Image image mode=RGB size=100x100 at''')
def _lowerCamelCase ( ):
lowerCamelCase :Optional[Any] = canny.gen_gaussian_kernel(9 , sigma=1.4)
# Assert ambiguous array
assert resp.all()
def _lowerCamelCase ( ):
lowerCamelCase :str = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0)
# assert ambiguous array for all == True
assert canny_img.all()
lowerCamelCase :Optional[Any] = canny.canny(a_)
# assert canny array for at least one True
assert canny_array.any()
def _lowerCamelCase ( ):
assert gg.gaussian_filter(a_ , 5 , sigma=0.9).all()
def _lowerCamelCase ( ):
# laplace diagonals
lowerCamelCase :List[Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]])
lowerCamelCase :List[Any] = conv.img_convolve(a_ , a_).astype(a_)
assert res.any()
def _lowerCamelCase ( ):
assert med.median_filter(a_ , 3).any()
def _lowerCamelCase ( ):
lowerCamelCase , lowerCamelCase :Union[str, Any] = sob.sobel_filter(a_)
assert grad.any() and theta.any()
def _lowerCamelCase ( ):
lowerCamelCase :Dict = sp.make_sepia(a_ , 20)
assert sepia.all()
def _lowerCamelCase ( a_ : str = "digital_image_processing/image_data/lena_small.jpg"):
lowerCamelCase :Any = bs.Burkes(imread(a_ , 1) , 1_20)
burkes.process()
assert burkes.output_img.any()
def _lowerCamelCase ( a_ : str = "digital_image_processing/image_data/lena_small.jpg" , ):
lowerCamelCase :Tuple = rs.NearestNeighbour(imread(a_ , 1) , 4_00 , 2_00)
nn.process()
assert nn.output.any()
def _lowerCamelCase ( ):
lowerCamelCase :Tuple = '''digital_image_processing/image_data/lena.jpg'''
# Reading the image and converting it to grayscale.
lowerCamelCase :Tuple = imread(a_ , 0)
# Test for get_neighbors_pixel function() return not None
lowerCamelCase :Dict = 0
lowerCamelCase :Optional[Any] = 0
lowerCamelCase :str = image[x_coordinate][y_coordinate]
lowerCamelCase :Any = lbp.get_neighbors_pixel(
a_ , a_ , a_ , a_)
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCamelCase :int = np.zeros((image.shape[0], image.shape[1]))
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0]):
for j in range(0 , image.shape[1]):
lowerCamelCase :Optional[int] = lbp.local_binary_value(a_ , a_ , a_)
assert lbp_image.any()
| 49
| 1
|
'''simple docstring'''
from manim import *
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
def a ( self : Tuple ) -> Optional[Any]:
__lowerCAmelCase = Rectangle(height=0.5 , width=0.5 )
__lowerCAmelCase = Rectangle(height=0.2_5 , width=0.2_5 )
__lowerCAmelCase = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
__lowerCAmelCase = [mem.copy() for i in range(6 )]
__lowerCAmelCase = [mem.copy() for i in range(6 )]
__lowerCAmelCase = VGroup(*SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0 )
__lowerCAmelCase = VGroup(*SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0 )
__lowerCAmelCase = VGroup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0 )
__lowerCAmelCase = Text("""CPU""" , font_size=24 )
__lowerCAmelCase = Group(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = [mem.copy() for i in range(4 )]
__lowerCAmelCase = VGroup(*SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0 )
__lowerCAmelCase = Text("""GPU""" , font_size=24 )
__lowerCAmelCase = Group(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE__ )
gpu.move_to([-1, -1, 0] )
self.add(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = [mem.copy() for i in range(6 )]
__lowerCAmelCase = VGroup(*SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0 )
__lowerCAmelCase = Text("""Model""" , font_size=24 )
__lowerCAmelCase = Group(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE__ )
model.move_to([3, -1.0, 0] )
self.add(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = []
__lowerCAmelCase = []
__lowerCAmelCase = []
for i, rect in enumerate(SCREAMING_SNAKE_CASE__ ):
rect.set_stroke(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = Rectangle(height=0.4_6 / 4 , width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(SCREAMING_SNAKE_CASE__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=SCREAMING_SNAKE_CASE__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=SCREAMING_SNAKE_CASE__ , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=SCREAMING_SNAKE_CASE__ , buff=0.0 )
self.add(SCREAMING_SNAKE_CASE__ )
model_cpu_arr.append(SCREAMING_SNAKE_CASE__ )
self.add(*SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = [mem.copy() for i in range(6 )]
__lowerCAmelCase = VGroup(*SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0 )
__lowerCAmelCase = Text("""Loaded Checkpoint""" , font_size=24 )
__lowerCAmelCase = Group(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE__ )
checkpoint.move_to([3, 0.5, 0] )
self.add(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = []
__lowerCAmelCase = []
for i, rect in enumerate(SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase = fill.copy().set_fill(SCREAMING_SNAKE_CASE__ , opacity=0.7 )
target.move_to(SCREAMING_SNAKE_CASE__ )
ckpt_arr.append(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(SCREAMING_SNAKE_CASE__ )
self.add(*SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__lowerCAmelCase = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(SCREAMING_SNAKE_CASE__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = MarkupText(
f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
__lowerCAmelCase = [meta_mem.copy() for i in range(6 )]
__lowerCAmelCase = [meta_mem.copy() for i in range(6 )]
__lowerCAmelCase = VGroup(*SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0 )
__lowerCAmelCase = VGroup(*SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0 )
__lowerCAmelCase = VGroup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0 )
__lowerCAmelCase = Text("""Disk""" , font_size=24 )
__lowerCAmelCase = Group(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE__ )
disk.move_to([-4.0, -1.2_5, 0] )
self.play(Write(SCREAMING_SNAKE_CASE__ , run_time=3 ) , Write(SCREAMING_SNAKE_CASE__ , run_time=1 ) , Create(SCREAMING_SNAKE_CASE__ , run_time=1 ) )
__lowerCAmelCase = []
for i, rect in enumerate(SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(SCREAMING_SNAKE_CASE__ , run_time=1.5 ) )
self.play(*SCREAMING_SNAKE_CASE__ )
self.play(FadeOut(SCREAMING_SNAKE_CASE__ ) )
__lowerCAmelCase = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(SCREAMING_SNAKE_CASE__ , run_time=3 ) )
self.play(
FadeOut(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ ) , )
self.wait()
| 427
|
'''simple docstring'''
def UpperCamelCase_ ( snake_case_ : str ) -> str:
'''simple docstring'''
return "".join(chr(ord(snake_case_ ) - 32 ) if """a""" <= char <= """z""" else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 427
| 1
|
'''simple docstring'''
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
if isinstance(_UpperCamelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class _UpperCAmelCase :
def lowerCAmelCase__ ( self : Tuple , a : int , a : Dict ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : Optional[Any] , a : List[Any] , a : Optional[Any] , a : List[str] ):
'''simple docstring'''
lowercase_ : Optional[int] = np.abs((a - b) ).max()
self.assertLessEqual(a , a , f"""Difference between torch and flax is {diff} (>= {tol}).""" )
def lowerCAmelCase__ ( self : Any , a : Union[str, Any] , a : Optional[Any] , a : List[Any] , a : int , a : Optional[int]=None , **a : str ):
'''simple docstring'''
lowercase_ : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(a , a )
lowercase_ : str = FlaxVisionTextDualEncoderModel(a )
lowercase_ : int = model(input_ids=a , pixel_values=a , attention_mask=a )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) )
def lowerCAmelCase__ ( self : Optional[int] , a : Optional[Any] , a : Dict , a : Dict , a : Optional[Any] , a : List[Any]=None , **a : Tuple ):
'''simple docstring'''
lowercase_ , lowercase_ : List[str] = self.get_vision_text_model(a , a )
lowercase_ : str = {"vision_model": vision_model, "text_model": text_model}
lowercase_ : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**a )
lowercase_ : Tuple = model(input_ids=a , pixel_values=a , attention_mask=a )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def lowerCAmelCase__ ( self : Tuple , a : List[Any] , a : Dict , a : Tuple , a : Optional[Any] , a : Any=None , **a : Optional[int] ):
'''simple docstring'''
lowercase_ , lowercase_ : List[str] = self.get_vision_text_model(a , a )
lowercase_ : Optional[int] = {"vision_model": vision_model, "text_model": text_model}
lowercase_ : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**a )
lowercase_ : Union[str, Any] = model(input_ids=a , pixel_values=a , attention_mask=a )
lowercase_ : Any = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a )
lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(a )
lowercase_ : List[str] = model(input_ids=a , pixel_values=a , attention_mask=a )
lowercase_ : List[Any] = after_output[0]
lowercase_ : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a , 1e-3 )
def lowerCAmelCase__ ( self : Union[str, Any] , a : Optional[Any] , a : str , a : str , a : Union[str, Any] , a : Dict=None , **a : Dict ):
'''simple docstring'''
lowercase_ , lowercase_ : Any = self.get_vision_text_model(a , a )
lowercase_ : Union[str, Any] = {"vision_model": vision_model, "text_model": text_model}
lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**a )
lowercase_ : int = model(
input_ids=a , pixel_values=a , attention_mask=a , output_attentions=a )
lowercase_ : Union[str, Any] = output.vision_model_output.attentions
self.assertEqual(len(a ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase_ : Optional[Any] = to_atuple(vision_model.config.image_size )
lowercase_ : str = to_atuple(vision_model.config.patch_size )
lowercase_ : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowercase_ : Tuple = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowercase_ : Any = output.text_model_output.attentions
self.assertEqual(len(a ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowerCAmelCase__ ( self : Union[str, Any] , a : List[Any] , a : Tuple , a : List[str] ):
'''simple docstring'''
pt_model.to(a )
pt_model.eval()
# prepare inputs
lowercase_ : Dict = inputs_dict
lowercase_ : int = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
lowercase_ : Dict = pt_model(**a ).to_tuple()
lowercase_ : Any = fx_model(**a ).to_tuple()
self.assertEqual(len(a ) , len(a ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(a , pt_output.numpy() , 4e-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(a )
lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(a , from_pt=a )
lowercase_ : List[Any] = fx_model_loaded(**a ).to_tuple()
self.assertEqual(len(a ) , len(a ) , "Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(a , pt_output.numpy() , 4e-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(a )
lowercase_ : Any = VisionTextDualEncoderModel.from_pretrained(a , from_flax=a )
pt_model_loaded.to(a )
pt_model_loaded.eval()
with torch.no_grad():
lowercase_ : List[str] = pt_model_loaded(**a ).to_tuple()
self.assertEqual(len(a ) , len(a ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(a , pt_output_loaded.numpy() , 4e-2 )
def lowerCAmelCase__ ( self : int , a : str , a : Optional[int] , a : Optional[Any] ):
'''simple docstring'''
lowercase_ : str = VisionTextDualEncoderConfig.from_vision_text_configs(a , a )
lowercase_ : List[str] = VisionTextDualEncoderModel(a )
lowercase_ : List[Any] = FlaxVisionTextDualEncoderModel(a )
lowercase_ : Tuple = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , a )
lowercase_ : Union[str, Any] = fx_state
self.check_pt_flax_equivalence(a , a , a )
def lowerCAmelCase__ ( self : str , a : List[Any] , a : List[str] , a : Optional[int] ):
'''simple docstring'''
lowercase_ : Any = VisionTextDualEncoderConfig.from_vision_text_configs(a , a )
lowercase_ : Optional[int] = VisionTextDualEncoderModel(a )
lowercase_ : Union[str, Any] = FlaxVisionTextDualEncoderModel(a )
lowercase_ : Any = load_flax_weights_in_pytorch_model(a , fx_model.params )
self.check_pt_flax_equivalence(a , a , a )
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : str = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**a )
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : Union[str, Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**a )
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : Optional[int] = self.prepare_config_and_inputs()
self.check_save_load(**a )
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : Tuple = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**a )
@is_pt_flax_cross_test
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : str = self.prepare_config_and_inputs()
lowercase_ : str = config_inputs_dict.pop("vision_config" )
lowercase_ : Tuple = config_inputs_dict.pop("text_config" )
lowercase_ : Dict = config_inputs_dict
self.check_equivalence_pt_to_flax(a , a , a )
self.check_equivalence_flax_to_pt(a , a , a )
@slow
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ , lowercase_ : Any = self.get_pretrained_model_and_inputs()
lowercase_ : List[str] = model_a(**a )
lowercase_ : Optional[Any] = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(a )
lowercase_ : Optional[Any] = FlaxVisionTextDualEncoderModel.from_pretrained(a )
lowercase_ : int = model_a(**a )
lowercase_ : List[str] = after_outputs[0]
lowercase_ : str = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a , 1e-5 )
@require_flax
class _UpperCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : List[str] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-bert" , vision_from_pt=a , text_from_pt=a , )
lowercase_ : Optional[int] = 1_3
lowercase_ : List[Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowercase_ : List[str] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
lowercase_ : List[str] = random_attention_mask([batch_size, 4] )
lowercase_ : str = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def lowerCAmelCase__ ( self : List[str] , a : Optional[Any] , a : Optional[int] ):
'''simple docstring'''
lowercase_ : Dict = FlaxViTModel(a )
lowercase_ : Optional[Any] = FlaxBertModel(a )
return vision_model, text_model
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : Optional[int] = FlaxViTModelTester(self )
lowercase_ : str = FlaxBertModelTester(self )
lowercase_ : Optional[Any] = vit_model_tester.prepare_config_and_inputs()
lowercase_ : Any = bert_model_tester.prepare_config_and_inputs()
lowercase_ , lowercase_ : Optional[Any] = vision_config_and_inputs
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Tuple = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class _UpperCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : Tuple = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-clip" , "hf-internal-testing/tiny-bert" , vision_from_pt=a , text_from_pt=a , )
lowercase_ : int = 1_3
lowercase_ : Any = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowercase_ : Optional[Any] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
lowercase_ : str = random_attention_mask([batch_size, 4] )
lowercase_ : Optional[int] = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def lowerCAmelCase__ ( self : Any , a : int , a : Tuple ):
'''simple docstring'''
lowercase_ : Optional[Any] = FlaxCLIPVisionModel(a )
lowercase_ : Optional[int] = FlaxBertModel(a )
return vision_model, text_model
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : Tuple = FlaxCLIPVisionModelTester(self )
lowercase_ : int = FlaxBertModelTester(self )
lowercase_ : Dict = clip_model_tester.prepare_config_and_inputs()
lowercase_ : Tuple = bert_model_tester.prepare_config_and_inputs()
lowercase_ , lowercase_ : List[str] = vision_config_and_inputs
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Optional[Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@slow
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : Optional[Any] = FlaxVisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian" , logit_scale_init_value=1.0 )
lowercase_ : Optional[Any] = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" )
lowercase_ : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
lowercase_ : Dict = processor(
text=["una foto di un gatto", "una foto di un cane"] , images=a , padding=a , return_tensors="np" )
lowercase_ : Tuple = model(**a )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
lowercase_ : List[str] = np.array([[1.228_4727, 0.310_4122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , a , atol=1e-3 ) )
| 717
|
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'pipelines_utils',
'0.22.0',
'Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.',
standard_warn=False,
stacklevel=3,
)
| 640
| 0
|
def a__ ( A__ ):
if not isinstance(A__, A__ ):
raise ValueError('multiplicative_persistence() only accepts integral values' )
if num < 0:
raise ValueError('multiplicative_persistence() does not accept negative values' )
SCREAMING_SNAKE_CASE_ : str = 0
SCREAMING_SNAKE_CASE_ : Dict = str(A__ )
while len(A__ ) != 1:
SCREAMING_SNAKE_CASE_ : Optional[Any] = [int(A__ ) for i in num_string]
SCREAMING_SNAKE_CASE_ : List[str] = 1
for i in range(0, len(A__ ) ):
total *= numbers[i]
SCREAMING_SNAKE_CASE_ : int = str(A__ )
steps += 1
return steps
def a__ ( A__ ):
if not isinstance(A__, A__ ):
raise ValueError('additive_persistence() only accepts integral values' )
if num < 0:
raise ValueError('additive_persistence() does not accept negative values' )
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : List[Any] = str(A__ )
while len(A__ ) != 1:
SCREAMING_SNAKE_CASE_ : List[str] = [int(A__ ) for i in num_string]
SCREAMING_SNAKE_CASE_ : str = 0
for i in range(0, len(A__ ) ):
total += numbers[i]
SCREAMING_SNAKE_CASE_ : int = str(A__ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101
|
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowercase :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=3 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=2_2_4 , lowerCAmelCase__=1_0_0_0 , lowerCAmelCase__=[3, 3, 6, 4] , lowerCAmelCase__=[4_8, 5_6, 1_1_2, 2_2_0] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = parent
SCREAMING_SNAKE_CASE_ : Optional[int] = batch_size
SCREAMING_SNAKE_CASE_ : Optional[int] = num_channels
SCREAMING_SNAKE_CASE_ : List[Any] = is_training
SCREAMING_SNAKE_CASE_ : str = use_labels
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : int = num_labels
SCREAMING_SNAKE_CASE_ : List[Any] = image_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = layer_depths
SCREAMING_SNAKE_CASE_ : List[Any] = embed_dims
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ : List[str] = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE_ : Dict = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='gelu' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowerCAmelCase__ , layer_scale_init_value=1E-5 , )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = SwiftFormerModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE_ : int = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.num_labels
SCREAMING_SNAKE_CASE_ : str = SwiftFormerForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
SCREAMING_SNAKE_CASE_ : Tuple = SwiftFormerForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE_ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ : str = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)) : Optional[Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
_UpperCAmelCase = (
{"""feature-extraction""": SwiftFormerModel, """image-classification""": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = SwiftFormerModelTester(self )
SCREAMING_SNAKE_CASE_ : str = ConfigTester(
self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=3_7 , num_attention_heads=1_2 , num_hidden_layers=1_2 , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='SwiftFormer does not use inputs_embeds' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : str = model_class(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : int = model_class(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ : int = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ : Tuple = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Optional[int] = SwiftFormerModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@unittest.skip(reason='SwiftFormer does not output attentions' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : Tuple = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : str = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE_ : List[str] = outputs.hidden_states
SCREAMING_SNAKE_CASE_ : Any = 8
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowerCAmelCase__ ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : str = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ : Dict = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
def _config_zero_init(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : List[str] = copy.deepcopy(lowerCAmelCase__ )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowerCAmelCase__ , lowerCAmelCase__ , 1E-10 )
if isinstance(getattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : int = _config_zero_init(getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
setattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return configs_no_init
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[Any] = _config_zero_init(lowerCAmelCase__ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : str = model_class(config=lowerCAmelCase__ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def a__ ( ):
SCREAMING_SNAKE_CASE_ : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained('MBZUAI/swiftformer-xs' ) if is_vision_available() else None
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = SwiftFormerForImageClassification.from_pretrained('MBZUAI/swiftformer-xs' ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.default_image_processor
SCREAMING_SNAKE_CASE_ : Any = prepare_img()
SCREAMING_SNAKE_CASE_ : List[str] = image_processor(images=lowerCAmelCase__ , return_tensors='pt' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[str] = model(**lowerCAmelCase__ )
# verify the logits
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = torch.tensor([[-2.1_703E00, 2.1_107E00, -2.0_811E00]] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 101
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
_a = 42
_a = 42
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Tuple, lowerCamelCase : int )-> str:
lowerCamelCase__ : list[list[Edge]] =[[] for _ in range(lowerCamelCase )]
lowerCamelCase__ : Tuple =size
def __getitem__( self : List[Any], lowerCamelCase : int )-> Iterator[Edge]:
return iter(self._graph[vertex] )
@property
def snake_case ( self : Optional[int] )-> List[str]:
return self._size
def snake_case ( self : Dict, lowerCamelCase : int, lowerCamelCase : int, lowerCamelCase : int )-> int:
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(lowerCamelCase, lowerCamelCase ) )
def snake_case ( self : Any, lowerCamelCase : int, lowerCamelCase : int )-> int | None:
lowerCamelCase__ : Tuple =deque([start_vertex] )
lowerCamelCase__ : list[int | None] =[None] * self.size
lowerCamelCase__ : List[str] =0
while queue:
lowerCamelCase__ : Union[str, Any] =queue.popleft()
lowerCamelCase__ : int =distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
lowerCamelCase__ : str =current_distance + edge.weight
lowerCamelCase__ : List[str] =distances[edge.destination_vertex]
if (
isinstance(lowerCamelCase, lowerCamelCase )
and new_distance >= dest_vertex_distance
):
continue
lowerCamelCase__ : List[Any] =new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 625
|
"""simple docstring"""
_lowercase : Optional[Any] = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 625
| 1
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_lowercase : Tuple = logging.get_logger(__name__)
_lowercase : Optional[int] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
_lowercase : Tuple = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def lowercase__ ( snake_case_ :Tuple , snake_case_ :Dict , snake_case_ :Optional[int] , snake_case_ :Any , snake_case_ :Dict ):
for attribute in key.split('''.''' ):
__UpperCAmelCase = getattr(snake_case_ , snake_case_ )
if weight_type is not None:
__UpperCAmelCase = getattr(snake_case_ , snake_case_ ).shape
else:
__UpperCAmelCase = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
__UpperCAmelCase = value
elif weight_type == "weight_g":
__UpperCAmelCase = value
elif weight_type == "weight_v":
__UpperCAmelCase = value
elif weight_type == "bias":
__UpperCAmelCase = value
else:
__UpperCAmelCase = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def lowercase__ ( snake_case_ :Dict , snake_case_ :Dict ):
__UpperCAmelCase = []
__UpperCAmelCase = fairseq_model.state_dict()
__UpperCAmelCase = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
__UpperCAmelCase = None
for name, value in fairseq_dict.items():
__UpperCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == '''group''' , )
__UpperCAmelCase = True
elif name.split('''.''' )[0] == "proj":
__UpperCAmelCase = fairseq_model.proj
__UpperCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__UpperCAmelCase = True
if "*" in mapped_key:
__UpperCAmelCase = name.split(snake_case_ )[0].split('''.''' )[-2]
__UpperCAmelCase = mapped_key.replace('''*''' , snake_case_ )
if "weight_g" in name:
__UpperCAmelCase = '''weight_g'''
elif "weight_v" in name:
__UpperCAmelCase = '''weight_v'''
elif "bias" in name:
__UpperCAmelCase = '''bias'''
elif "weight" in name:
__UpperCAmelCase = '''weight'''
else:
__UpperCAmelCase = None
set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
continue
if not is_used:
unused_weights.append(snake_case_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
return proj_weight
def lowercase__ ( snake_case_ :str , snake_case_ :List[Any] , snake_case_ :str , snake_case_ :List[Any] , snake_case_ :Dict ):
__UpperCAmelCase = full_name.split('''conv_layers.''' )[-1]
__UpperCAmelCase = name.split('''.''' )
__UpperCAmelCase = int(items[0] )
__UpperCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
__UpperCAmelCase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
__UpperCAmelCase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
__UpperCAmelCase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
__UpperCAmelCase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(snake_case_ )
def lowercase__ ( snake_case_ :Any ):
__UpperCAmelCase , __UpperCAmelCase = emb.weight.shape
__UpperCAmelCase = nn.Linear(snake_case_ , snake_case_ , bias=snake_case_ )
__UpperCAmelCase = emb.weight.data
return lin_layer
def lowercase__ ( snake_case_ :str ):
with open(snake_case_ , '''r''' , encoding='''utf-8''' ) as f:
__UpperCAmelCase = f.readlines()
__UpperCAmelCase = [line.split(''' ''' )[0] for line in lines]
__UpperCAmelCase = len(snake_case_ )
__UpperCAmelCase = {
'''<s>''': 0,
'''<pad>''': 1,
'''</s>''': 2,
'''<unk>''': 3,
}
vocab_dict.update(dict(zip(snake_case_ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def lowercase__ ( snake_case_ :int , snake_case_ :Any , snake_case_ :Optional[int] , snake_case_ :Tuple , snake_case_ :Optional[int] , snake_case_ :Optional[Any] , snake_case_ :List[Any] , ):
__UpperCAmelCase = WavaVecaConfig.from_pretrained(snake_case_ )
__UpperCAmelCase = SpeechaTextaConfig.from_pretrained(
snake_case_ , vocab_size=snake_case_ , decoder_layers=snake_case_ , do_stable_layer_norm=snake_case_ )
__UpperCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=snake_case_ , return_attention_mask=snake_case_ , )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
__UpperCAmelCase = model[0].eval()
# set weights for wav2vec2 encoder
__UpperCAmelCase = WavaVecaModel(snake_case_ )
__UpperCAmelCase = recursively_load_weights_wavaveca(model.encoder , snake_case_ )
__UpperCAmelCase = SpeechaTextaForCausalLM(snake_case_ )
__UpperCAmelCase , __UpperCAmelCase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=snake_case_ )
# set output linear layer
unexpected_keys.remove('''embed_out''' )
__UpperCAmelCase = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
__UpperCAmelCase = SpeechEncoderDecoderModel(encoder=snake_case_ , decoder=snake_case_ )
__UpperCAmelCase = False
# add projection layer
__UpperCAmelCase = nn.Parameter(projection_layer.weight )
__UpperCAmelCase = nn.Parameter(projection_layer.bias )
__UpperCAmelCase = create_vocab_dict(snake_case_ )
with open(os.path.join(snake_case_ , '''vocab.json''' ) , '''w''' ) as fp:
json.dump(snake_case_ , snake_case_ )
__UpperCAmelCase = SpeechaTextaTokenizer(os.path.join(snake_case_ , '''vocab.json''' ) )
tokenizer.save_pretrained(snake_case_ )
__UpperCAmelCase = hf_wavavec.config.to_dict()
__UpperCAmelCase = tokenizer.pad_token_id
__UpperCAmelCase = tokenizer.bos_token_id
__UpperCAmelCase = tokenizer.eos_token_id
__UpperCAmelCase = '''speech_to_text_2'''
__UpperCAmelCase = '''wav2vec2'''
__UpperCAmelCase = SpeechEncoderDecoderConfig.from_dict(snake_case_ )
hf_wavavec.save_pretrained(snake_case_ )
feature_extractor.save_pretrained(snake_case_ )
if __name__ == "__main__":
_lowercase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-large-lv60',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/s2t-small-mustc-en-fr-st',
type=str,
help='Path to hf decoder s2t checkpoint config',
)
parser.add_argument('--vocab_size', default=1_02_24, type=int, help='Vocab size of decoder')
parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers')
_lowercase : Dict = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 49
|
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class lowercase ( unittest.TestCase ):
def a ( self ):
snake_case_ = 10
def a ( self ):
snake_case_ = [1, 2, 3, 4]
snake_case_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(snake_case , self.block_size , 0 ) , snake_case )
def a ( self ):
snake_case_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
snake_case_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(snake_case , self.block_size , 0 ) , snake_case )
def a ( self ):
snake_case_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
snake_case_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(snake_case , self.block_size , 0 ) , snake_case )
def a ( self ):
snake_case_ = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'
snake_case_ , snake_case_ = process_story(snake_case )
self.assertEqual(snake_case , [] )
def a ( self ):
snake_case_ = ''
snake_case_ , snake_case_ = process_story(snake_case )
self.assertEqual(snake_case , [] )
self.assertEqual(snake_case , [] )
def a ( self ):
snake_case_ = (
'It was the year of Our Lord one thousand seven hundred and '
'seventy-five\n\nSpiritual revelations were conceded to England '
'at that favoured period, as at this.\n@highlight\n\nIt was the best of times'
)
snake_case_ , snake_case_ = process_story(snake_case )
snake_case_ = [
'It was the year of Our Lord one thousand seven hundred and seventy-five.',
'Spiritual revelations were conceded to England at that favoured period, as at this.',
]
self.assertEqual(snake_case , snake_case )
snake_case_ = ['It was the best of times.']
self.assertEqual(snake_case , snake_case )
def a ( self ):
snake_case_ = torch.tensor([1, 2, 3, 4] )
snake_case_ = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(snake_case , 0 ).numpy() , expected.numpy() )
def a ( self ):
snake_case_ = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
snake_case_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(snake_case , 23 ).numpy() , expected.numpy() )
def a ( self ):
snake_case_ = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
snake_case_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(snake_case , 1 ).numpy() , expected.numpy() )
def a ( self ):
snake_case_ = 101
snake_case_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
snake_case_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
snake_case_ = compute_token_type_ids(snake_case , snake_case )
np.testing.assert_array_equal(snake_case , snake_case )
| 362
| 0
|
"""simple docstring"""
from manim import *
class snake_case_ ( _lowerCamelCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = Rectangle(height=0.5 , width=0.5 )
A__ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
A__ = [mem.copy() for i in range(6 )]
A__ = [mem.copy() for i in range(6 )]
A__ = VGroup(*__a ).arrange(__a , buff=0 )
A__ = VGroup(*__a ).arrange(__a , buff=0 )
A__ = VGroup(__a , __a ).arrange(__a , buff=0 )
A__ = Text('CPU' , font_size=24 )
A__ = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__a )
A__ = [mem.copy() for i in range(1 )]
A__ = VGroup(*__a ).arrange(__a , buff=0 )
A__ = Text('GPU' , font_size=24 )
A__ = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
gpu.align_to(__a , __a )
gpu.set_x(gpu.get_x() - 1 )
self.add(__a )
A__ = [mem.copy() for i in range(6 )]
A__ = VGroup(*__a ).arrange(__a , buff=0 )
A__ = Text('Model' , font_size=24 )
A__ = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
model.move_to([3, -1.0, 0] )
self.play(
Create(__a , run_time=1 ) , Create(__a , run_time=1 ) , Create(__a , run_time=1 ) , )
A__ = MarkupText(
f'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' , font_size=24 , )
A__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
A__ = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(__a , run_time=2.5 ) , Write(__a ) , Write(__a ) )
self.add(__a )
A__ = []
A__ = []
A__ = []
for i, rect in enumerate(__a ):
A__ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(__a , opacity=0.7 )
cpu_target.move_to(__a )
cpu_target.generate_target()
A__ = 0.46 / 4
A__ = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__a )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=__a , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=__a , buff=0.0 )
cpu_targs.append(__a )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(__a ) )
second_animations.append(MoveToTarget(__a , run_time=1.5 ) )
self.play(*__a )
self.play(*__a )
self.wait()
| 554
|
"""simple docstring"""
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
SCREAMING_SNAKE_CASE : Optional[int] = logging.getLogger(__name__)
def __lowerCamelCase ( ):
A__ = argparse.ArgumentParser(
description='Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).' )
parser.add_argument('--file_path' ,type=lowerCAmelCase__ ,default='data/dump.txt' ,help='The path to the data.' )
parser.add_argument('--tokenizer_type' ,type=lowerCAmelCase__ ,default='bert' ,choices=['bert', 'roberta', 'gpt2'] )
parser.add_argument('--tokenizer_name' ,type=lowerCAmelCase__ ,default='bert-base-uncased' ,help='The tokenizer to use.' )
parser.add_argument('--dump_file' ,type=lowerCAmelCase__ ,default='data/dump' ,help='The dump file prefix.' )
A__ = parser.parse_args()
logger.info(f'''Loading Tokenizer ({args.tokenizer_name})''' )
if args.tokenizer_type == "bert":
A__ = BertTokenizer.from_pretrained(args.tokenizer_name )
A__ = tokenizer.special_tokens_map['cls_token'] # `[CLS]`
A__ = tokenizer.special_tokens_map['sep_token'] # `[SEP]`
elif args.tokenizer_type == "roberta":
A__ = RobertaTokenizer.from_pretrained(args.tokenizer_name )
A__ = tokenizer.special_tokens_map['cls_token'] # `<s>`
A__ = tokenizer.special_tokens_map['sep_token'] # `</s>`
elif args.tokenizer_type == "gpt2":
A__ = GPTaTokenizer.from_pretrained(args.tokenizer_name )
A__ = tokenizer.special_tokens_map['bos_token'] # `<|endoftext|>`
A__ = tokenizer.special_tokens_map['eos_token'] # `<|endoftext|>`
logger.info(f'''Loading text from {args.file_path}''' )
with open(args.file_path ,'r' ,encoding='utf8' ) as fp:
A__ = fp.readlines()
logger.info('Start encoding' )
logger.info(f'''{len(lowerCAmelCase__ )} examples to process.''' )
A__ = []
A__ = 0
A__ = 1_0000
A__ = time.time()
for text in data:
A__ = f'''{bos} {text.strip()} {sep}'''
A__ = tokenizer.encode(lowerCAmelCase__ ,add_special_tokens=lowerCAmelCase__ )
rslt.append(lowerCAmelCase__ )
iter += 1
if iter % interval == 0:
A__ = time.time()
logger.info(f'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' )
A__ = time.time()
logger.info('Finished binarization' )
logger.info(f'''{len(lowerCAmelCase__ )} examples processed.''' )
A__ = f'''{args.dump_file}.{args.tokenizer_name}.pickle'''
A__ = tokenizer.vocab_size
if vocab_size < (1 << 16):
A__ = [np.uintaa(lowerCAmelCase__ ) for d in rslt]
else:
A__ = [np.intaa(lowerCAmelCase__ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f'''Dump to {dp_file}''' )
with open(lowerCAmelCase__ ,'wb' ) as handle:
pickle.dump(rslt_ ,lowerCAmelCase__ ,protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 554
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_SCREAMING_SNAKE_CASE = {'''configuration_swin''': ['''SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SwinConfig''', '''SwinOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwinForImageClassification''',
'''SwinForMaskedImageModeling''',
'''SwinModel''',
'''SwinPreTrainedModel''',
'''SwinBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSwinForImageClassification''',
'''TFSwinForMaskedImageModeling''',
'''TFSwinModel''',
'''TFSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 502
|
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
_SCREAMING_SNAKE_CASE = '''CompVis/stable-diffusion-v1-1'''
_SCREAMING_SNAKE_CASE = '''CompVis/stable-diffusion-v1-2'''
_SCREAMING_SNAKE_CASE = '''CompVis/stable-diffusion-v1-3'''
_SCREAMING_SNAKE_CASE = '''CompVis/stable-diffusion-v1-4'''
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = True ,) -> Dict:
'''simple docstring'''
super()._init_()
__lowercase = StableDiffusionPipeline.from_pretrained(_lowerCamelCase )
__lowercase = StableDiffusionPipeline.from_pretrained(_lowerCamelCase )
__lowercase = StableDiffusionPipeline.from_pretrained(_lowerCamelCase )
__lowercase = StableDiffusionPipeline(
vae=_lowerCamelCase ,text_encoder=_lowerCamelCase ,tokenizer=_lowerCamelCase ,unet=_lowerCamelCase ,scheduler=_lowerCamelCase ,safety_checker=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,requires_safety_checker=_lowerCamelCase ,)
self.register_modules(pipelinea=self.pipea ,pipelinea=self.pipea ,pipelinea=self.pipea ,pipelinea=self.pipea )
@property
def _UpperCAmelCase (self ) -> Dict[str, Any]:
'''simple docstring'''
return {k: getattr(self ,_lowerCamelCase ) for k in self.config.keys() if not k.startswith('''_''' )}
def _UpperCAmelCase (self ,_lowerCamelCase = "auto" ) -> Optional[int]:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__lowercase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowerCamelCase )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
self.enable_attention_slicing(_lowerCamelCase )
@torch.no_grad()
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = 512 ,_lowerCamelCase = 512 ,_lowerCamelCase = 50 ,_lowerCamelCase = 7.5 ,_lowerCamelCase = None ,_lowerCamelCase = 1 ,_lowerCamelCase = 0.0 ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = "pil" ,_lowerCamelCase = True ,_lowerCamelCase = None ,_lowerCamelCase = 1 ,**_lowerCamelCase ,) -> int:
'''simple docstring'''
return self.pipea(
prompt=_lowerCamelCase ,height=_lowerCamelCase ,width=_lowerCamelCase ,num_inference_steps=_lowerCamelCase ,guidance_scale=_lowerCamelCase ,negative_prompt=_lowerCamelCase ,num_images_per_prompt=_lowerCamelCase ,eta=_lowerCamelCase ,generator=_lowerCamelCase ,latents=_lowerCamelCase ,output_type=_lowerCamelCase ,return_dict=_lowerCamelCase ,callback=_lowerCamelCase ,callback_steps=_lowerCamelCase ,**_lowerCamelCase ,)
@torch.no_grad()
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = 512 ,_lowerCamelCase = 512 ,_lowerCamelCase = 50 ,_lowerCamelCase = 7.5 ,_lowerCamelCase = None ,_lowerCamelCase = 1 ,_lowerCamelCase = 0.0 ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = "pil" ,_lowerCamelCase = True ,_lowerCamelCase = None ,_lowerCamelCase = 1 ,**_lowerCamelCase ,) -> str:
'''simple docstring'''
return self.pipea(
prompt=_lowerCamelCase ,height=_lowerCamelCase ,width=_lowerCamelCase ,num_inference_steps=_lowerCamelCase ,guidance_scale=_lowerCamelCase ,negative_prompt=_lowerCamelCase ,num_images_per_prompt=_lowerCamelCase ,eta=_lowerCamelCase ,generator=_lowerCamelCase ,latents=_lowerCamelCase ,output_type=_lowerCamelCase ,return_dict=_lowerCamelCase ,callback=_lowerCamelCase ,callback_steps=_lowerCamelCase ,**_lowerCamelCase ,)
@torch.no_grad()
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = 512 ,_lowerCamelCase = 512 ,_lowerCamelCase = 50 ,_lowerCamelCase = 7.5 ,_lowerCamelCase = None ,_lowerCamelCase = 1 ,_lowerCamelCase = 0.0 ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = "pil" ,_lowerCamelCase = True ,_lowerCamelCase = None ,_lowerCamelCase = 1 ,**_lowerCamelCase ,) -> int:
'''simple docstring'''
return self.pipea(
prompt=_lowerCamelCase ,height=_lowerCamelCase ,width=_lowerCamelCase ,num_inference_steps=_lowerCamelCase ,guidance_scale=_lowerCamelCase ,negative_prompt=_lowerCamelCase ,num_images_per_prompt=_lowerCamelCase ,eta=_lowerCamelCase ,generator=_lowerCamelCase ,latents=_lowerCamelCase ,output_type=_lowerCamelCase ,return_dict=_lowerCamelCase ,callback=_lowerCamelCase ,callback_steps=_lowerCamelCase ,**_lowerCamelCase ,)
@torch.no_grad()
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = 512 ,_lowerCamelCase = 512 ,_lowerCamelCase = 50 ,_lowerCamelCase = 7.5 ,_lowerCamelCase = None ,_lowerCamelCase = 1 ,_lowerCamelCase = 0.0 ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = "pil" ,_lowerCamelCase = True ,_lowerCamelCase = None ,_lowerCamelCase = 1 ,**_lowerCamelCase ,) -> Tuple:
'''simple docstring'''
return self.pipea(
prompt=_lowerCamelCase ,height=_lowerCamelCase ,width=_lowerCamelCase ,num_inference_steps=_lowerCamelCase ,guidance_scale=_lowerCamelCase ,negative_prompt=_lowerCamelCase ,num_images_per_prompt=_lowerCamelCase ,eta=_lowerCamelCase ,generator=_lowerCamelCase ,latents=_lowerCamelCase ,output_type=_lowerCamelCase ,return_dict=_lowerCamelCase ,callback=_lowerCamelCase ,callback_steps=_lowerCamelCase ,**_lowerCamelCase ,)
@torch.no_grad()
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = 512 ,_lowerCamelCase = 512 ,_lowerCamelCase = 50 ,_lowerCamelCase = 7.5 ,_lowerCamelCase = None ,_lowerCamelCase = 1 ,_lowerCamelCase = 0.0 ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = "pil" ,_lowerCamelCase = True ,_lowerCamelCase = None ,_lowerCamelCase = 1 ,**_lowerCamelCase ,) -> Optional[int]:
'''simple docstring'''
__lowercase = '''cuda''' if torch.cuda.is_available() else '''cpu'''
self.to(_lowerCamelCase )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` must be divisible by 8 but are {height} and {width}." )
# Get first result from Stable Diffusion Checkpoint v1.1
__lowercase = self.textaimg_sda_a(
prompt=_lowerCamelCase ,height=_lowerCamelCase ,width=_lowerCamelCase ,num_inference_steps=_lowerCamelCase ,guidance_scale=_lowerCamelCase ,negative_prompt=_lowerCamelCase ,num_images_per_prompt=_lowerCamelCase ,eta=_lowerCamelCase ,generator=_lowerCamelCase ,latents=_lowerCamelCase ,output_type=_lowerCamelCase ,return_dict=_lowerCamelCase ,callback=_lowerCamelCase ,callback_steps=_lowerCamelCase ,**_lowerCamelCase ,)
# Get first result from Stable Diffusion Checkpoint v1.2
__lowercase = self.textaimg_sda_a(
prompt=_lowerCamelCase ,height=_lowerCamelCase ,width=_lowerCamelCase ,num_inference_steps=_lowerCamelCase ,guidance_scale=_lowerCamelCase ,negative_prompt=_lowerCamelCase ,num_images_per_prompt=_lowerCamelCase ,eta=_lowerCamelCase ,generator=_lowerCamelCase ,latents=_lowerCamelCase ,output_type=_lowerCamelCase ,return_dict=_lowerCamelCase ,callback=_lowerCamelCase ,callback_steps=_lowerCamelCase ,**_lowerCamelCase ,)
# Get first result from Stable Diffusion Checkpoint v1.3
__lowercase = self.textaimg_sda_a(
prompt=_lowerCamelCase ,height=_lowerCamelCase ,width=_lowerCamelCase ,num_inference_steps=_lowerCamelCase ,guidance_scale=_lowerCamelCase ,negative_prompt=_lowerCamelCase ,num_images_per_prompt=_lowerCamelCase ,eta=_lowerCamelCase ,generator=_lowerCamelCase ,latents=_lowerCamelCase ,output_type=_lowerCamelCase ,return_dict=_lowerCamelCase ,callback=_lowerCamelCase ,callback_steps=_lowerCamelCase ,**_lowerCamelCase ,)
# Get first result from Stable Diffusion Checkpoint v1.4
__lowercase = self.textaimg_sda_a(
prompt=_lowerCamelCase ,height=_lowerCamelCase ,width=_lowerCamelCase ,num_inference_steps=_lowerCamelCase ,guidance_scale=_lowerCamelCase ,negative_prompt=_lowerCamelCase ,num_images_per_prompt=_lowerCamelCase ,eta=_lowerCamelCase ,generator=_lowerCamelCase ,latents=_lowerCamelCase ,output_type=_lowerCamelCase ,return_dict=_lowerCamelCase ,callback=_lowerCamelCase ,callback_steps=_lowerCamelCase ,**_lowerCamelCase ,)
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 502
| 1
|
'''simple docstring'''
class A :
'''simple docstring'''
def __init__(self ) -> Optional[Any]:
__UpperCamelCase : int = 0
__UpperCamelCase : Any = 0
__UpperCamelCase : Union[str, Any] = {}
def a_ (self , _UpperCAmelCase ) -> int:
if vertex not in self.adjacency:
__UpperCamelCase : Any = {}
self.num_vertices += 1
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> str:
self.add_vertex(_a )
self.add_vertex(_a )
if head == tail:
return
__UpperCamelCase : Optional[int] = weight
__UpperCamelCase : int = weight
def a_ (self ) -> List[str]:
__UpperCamelCase : List[str] = self.get_edges()
for edge in edges:
__UpperCamelCase : Tuple = edge
edges.remove((tail, head, weight) )
for i in range(len(_a ) ):
__UpperCamelCase : Optional[int] = list(edges[i] )
edges.sort(key=lambda _UpperCAmelCase : e[2] )
for i in range(len(_a ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
__UpperCamelCase : Optional[Any] = edges[i][2] + 1
for edge in edges:
__UpperCamelCase : Any = edge
__UpperCamelCase : Any = weight
__UpperCamelCase : Optional[Any] = weight
def __str__(self ) -> Optional[int]:
__UpperCamelCase : Any = """"""
for tail in self.adjacency:
for head in self.adjacency[tail]:
__UpperCamelCase : str = self.adjacency[head][tail]
string += f"{head} -> {tail} == {weight}\n"
return string.rstrip("\n" )
def a_ (self ) -> Tuple:
__UpperCamelCase : str = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def a_ (self ) -> str:
return self.adjacency.keys()
@staticmethod
def a_ (_UpperCAmelCase=None , _UpperCAmelCase=None ) -> Any:
__UpperCamelCase : str = Graph()
if vertices is None:
__UpperCamelCase : List[Any] = []
if edges is None:
__UpperCamelCase : Optional[int] = []
for vertex in vertices:
g.add_vertex(_a )
for edge in edges:
g.add_edge(*_a )
return g
class A :
'''simple docstring'''
def __init__(self ) -> Optional[Any]:
__UpperCamelCase : int = {}
__UpperCamelCase : List[str] = {}
def __len__(self ) -> Optional[int]:
return len(self.parent )
def a_ (self , _UpperCAmelCase ) -> Union[str, Any]:
if item in self.parent:
return self.find(_a )
__UpperCamelCase : Union[str, Any] = item
__UpperCamelCase : Any = 0
return item
def a_ (self , _UpperCAmelCase ) -> Tuple:
if item not in self.parent:
return self.make_set(_a )
if item != self.parent[item]:
__UpperCamelCase : Tuple = self.find(self.parent[item] )
return self.parent[item]
def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
__UpperCamelCase : int = self.find(_a )
__UpperCamelCase : Any = self.find(_a )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
__UpperCamelCase : Optional[int] = roota
return roota
if self.rank[roota] < self.rank[roota]:
__UpperCamelCase : Optional[int] = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
__UpperCamelCase : Optional[int] = roota
return roota
return None
@staticmethod
def a_ (_UpperCAmelCase ) -> int:
__UpperCamelCase : List[Any] = graph.num_vertices
__UpperCamelCase : Optional[int] = Graph.UnionFind()
__UpperCamelCase : List[str] = []
while num_components > 1:
__UpperCamelCase : Union[str, Any] = {}
for vertex in graph.get_vertices():
__UpperCamelCase : List[Any] = -1
__UpperCamelCase : List[str] = graph.get_edges()
for edge in edges:
__UpperCamelCase : Any = edge
edges.remove((tail, head, weight) )
for edge in edges:
__UpperCamelCase : int = edge
__UpperCamelCase : Tuple = union_find.find(_a )
__UpperCamelCase : str = union_find.find(_a )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__UpperCamelCase : Optional[Any] = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__UpperCamelCase : List[str] = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
__UpperCamelCase : Union[str, Any] = cheap_edge[vertex]
if union_find.find(_a ) != union_find.find(_a ):
union_find.union(_a , _a )
mst_edges.append(cheap_edge[vertex] )
__UpperCamelCase : Any = num_components - 1
__UpperCamelCase : List[str] = Graph.build(edges=_a )
return mst
| 710
|
'''simple docstring'''
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class A :
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase=1_3 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=9_9 , _UpperCAmelCase=3_2 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=3_7 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=5_0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=None , ) -> Any:
__UpperCamelCase : Union[str, Any] = parent
__UpperCamelCase : Dict = batch_size
__UpperCamelCase : Dict = seq_length
__UpperCamelCase : Optional[int] = is_training
__UpperCamelCase : Optional[Any] = use_input_mask
__UpperCamelCase : Optional[Any] = vocab_size
__UpperCamelCase : Tuple = hidden_size
__UpperCamelCase : Optional[Any] = num_hidden_layers
__UpperCamelCase : Optional[Any] = num_attention_heads
__UpperCamelCase : Union[str, Any] = intermediate_size
__UpperCamelCase : List[str] = hidden_act
__UpperCamelCase : Optional[int] = hidden_dropout_prob
__UpperCamelCase : Any = attention_probs_dropout_prob
__UpperCamelCase : Dict = max_position_embeddings
__UpperCamelCase : List[str] = initializer_range
__UpperCamelCase : Union[str, Any] = use_labels
__UpperCamelCase : Optional[Any] = scope
def a_ (self ) -> Tuple:
__UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase : Any = None
if self.use_input_mask:
__UpperCamelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
__UpperCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase : Any = self.get_config()
return config, input_ids, input_mask, token_labels
def a_ (self ) -> Tuple:
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
def a_ (self ) -> Dict:
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) : Any = self.prepare_config_and_inputs()
__UpperCamelCase : int = True
__UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , ) -> Optional[int]:
__UpperCamelCase : Union[str, Any] = BertGenerationEncoder(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__UpperCamelCase : int = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )
__UpperCamelCase : List[Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , ) -> Any:
__UpperCamelCase : Any = True
__UpperCamelCase : Optional[Any] = BertGenerationEncoder(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__UpperCamelCase : Tuple = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , )
__UpperCamelCase : Any = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , ) -> Optional[int]:
__UpperCamelCase : Optional[int] = True
__UpperCamelCase : Optional[int] = True
__UpperCamelCase : Dict = BertGenerationDecoder(config=_UpperCAmelCase ).to(_UpperCAmelCase ).eval()
# first forward pass
__UpperCamelCase : Tuple = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase , )
__UpperCamelCase : Optional[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__UpperCamelCase : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCamelCase : str = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__UpperCamelCase : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCamelCase : Any = torch.cat([input_mask, next_mask] , dim=-1 )
__UpperCamelCase : Any = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , )["hidden_states"][0]
__UpperCamelCase : str = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , )["hidden_states"][0]
# select random slice
__UpperCamelCase : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCamelCase : int = output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCamelCase : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase , ) -> Optional[Any]:
__UpperCamelCase : List[Any] = BertGenerationDecoder(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__UpperCamelCase : List[str] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a_ (self ) -> Dict:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : str = self.prepare_config_and_inputs()
__UpperCamelCase : Any = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
A = (BertGenerationDecoder,) if is_torch_available() else ()
A = (
{"feature-extraction": BertGenerationEncoder, "text-generation": BertGenerationDecoder}
if is_torch_available()
else {}
)
def a_ (self ) -> Tuple:
__UpperCamelCase : Optional[Any] = BertGenerationEncoderTester(self )
__UpperCamelCase : Optional[Any] = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=3_7 )
def a_ (self ) -> List[Any]:
self.config_tester.run_common_tests()
def a_ (self ) -> List[str]:
__UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def a_ (self ) -> Union[str, Any]:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
__UpperCamelCase : List[Any] = "bert"
self.model_tester.create_and_check_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def a_ (self ) -> Any:
__UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_UpperCAmelCase )
def a_ (self ) -> Optional[int]:
__UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*_UpperCAmelCase )
def a_ (self ) -> Tuple:
# This regression test was failing with PyTorch < 1.3
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
__UpperCamelCase : Optional[int] = None
self.model_tester.create_and_check_model_as_decoder(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
def a_ (self ) -> Dict:
__UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*_UpperCAmelCase )
@slow
def a_ (self ) -> int:
__UpperCamelCase : Dict = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
self.assertIsNotNone(_UpperCAmelCase )
@require_torch
class A ( unittest.TestCase ):
'''simple docstring'''
@slow
def a_ (self ) -> Tuple:
__UpperCamelCase : List[str] = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
__UpperCamelCase : List[str] = torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] )
with torch.no_grad():
__UpperCamelCase : Any = model(_UpperCAmelCase )[0]
__UpperCamelCase : List[Any] = torch.Size([1, 8, 1_0_2_4] )
self.assertEqual(output.shape , _UpperCAmelCase )
__UpperCamelCase : List[Any] = torch.tensor(
[[[0.1_775, 0.0_083, -0.0_321], [1.6_002, 0.1_287, 0.3_912], [2.1_473, 0.5_791, 0.6_066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1E-4 ) )
@require_torch
class A ( unittest.TestCase ):
'''simple docstring'''
@slow
def a_ (self ) -> Tuple:
__UpperCamelCase : Any = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
__UpperCamelCase : str = torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] )
with torch.no_grad():
__UpperCamelCase : Tuple = model(_UpperCAmelCase )[0]
__UpperCamelCase : Tuple = torch.Size([1, 8, 5_0_3_5_8] )
self.assertEqual(output.shape , _UpperCAmelCase )
__UpperCamelCase : int = torch.tensor(
[[[-0.5_788, -2.5_994, -3.7_054], [0.0_438, 4.7_997, 1.8_795], [1.5_862, 6.6_409, 4.4_638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1E-4 ) )
| 399
| 0
|
"""simple docstring"""
def snake_case__ ( _lowerCamelCase = 1_00_00_00 ) ->int:
"""simple docstring"""
__lowercase : int = 1
__lowercase : Optional[Any] = 1
__lowercase : Optional[Any] = {1: 1}
for inputa in range(2, _lowerCamelCase ):
__lowercase : Dict = 0
__lowercase : int = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
__lowercase : Dict = (3 * number) + 1
counter += 1
if inputa not in counters:
__lowercase : int = counter
if counter > pre_counter:
__lowercase : Optional[int] = inputa
__lowercase : Union[str, Any] = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 575
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__A : List[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
__A : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 575
| 1
|
import os
def UpperCamelCase ( ):
'''simple docstring'''
with open(os.path.dirname(lowerCAmelCase__ ) + '''/p022_names.txt''' ) as file:
lowercase = str(file.readlines()[0] )
lowercase = names.replace('''"''' , '''''' ).split(''',''' )
names.sort()
lowercase = 0
lowercase = 0
for i, name in enumerate(lowerCAmelCase__ ):
for letter in name:
name_score += ord(lowerCAmelCase__ ) - 64
total_score += (i + 1) * name_score
lowercase = 0
return total_score
if __name__ == "__main__":
print(solution())
| 720
|
import os
def UpperCamelCase ( lowerCAmelCase__ = "input.txt" ):
'''simple docstring'''
with open(os.path.join(os.path.dirname(lowerCAmelCase__ ) , lowerCAmelCase__ ) ) as input_file:
lowercase = [
[int(lowerCAmelCase__ ) for element in line.split(''',''' )]
for line in input_file.readlines()
]
lowercase = len(lowerCAmelCase__ )
lowercase = len(matrix[0] )
lowercase = [[-1 for _ in range(lowerCAmelCase__ )] for _ in range(lowerCAmelCase__ )]
for i in range(lowerCAmelCase__ ):
lowercase = matrix[i][0]
for j in range(1 , lowerCAmelCase__ ):
for i in range(lowerCAmelCase__ ):
lowercase = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , lowerCAmelCase__ ):
lowercase = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
lowercase = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F'{solution() = }')
| 633
| 0
|
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
A_ = 6378137.0
A_ = 6356752.314245
A_ = 6_3_7_8_1_3_7
def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase )-> float:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
SCREAMING_SNAKE_CASE_ = atan((1 - flattening) * tan(radians(UpperCAmelCase ) ) )
SCREAMING_SNAKE_CASE_ = atan((1 - flattening) * tan(radians(UpperCAmelCase ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
SCREAMING_SNAKE_CASE_ = haversine_distance(UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
SCREAMING_SNAKE_CASE_ = (b_lata + b_lata) / 2
SCREAMING_SNAKE_CASE_ = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
SCREAMING_SNAKE_CASE_ = (sin(UpperCAmelCase ) ** 2) * (cos(UpperCAmelCase ) ** 2)
SCREAMING_SNAKE_CASE_ = cos(sigma / 2 ) ** 2
SCREAMING_SNAKE_CASE_ = (sigma - sin(UpperCAmelCase )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
SCREAMING_SNAKE_CASE_ = (cos(UpperCAmelCase ) ** 2) * (sin(UpperCAmelCase ) ** 2)
SCREAMING_SNAKE_CASE_ = sin(sigma / 2 ) ** 2
SCREAMING_SNAKE_CASE_ = (sigma + sin(UpperCAmelCase )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 393
|
def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase )-> list:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = len(UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = [[0] * n for i in range(UpperCAmelCase )]
for i in range(UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = y_points[i]
for i in range(2 ,UpperCAmelCase ):
for j in range(UpperCAmelCase ,UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 393
| 1
|
'''simple docstring'''
from math import sqrt
def SCREAMING_SNAKE_CASE( UpperCamelCase = 1_0_0_0_0_0_0 ) -> int:
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 ,2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(UpperCamelCase ,sum_shortest_sides // 2 )
- max(1 ,sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F'{solution() = }')
| 471
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
lowerCAmelCase__ = ["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 471
| 1
|
'''simple docstring'''
import math
import unittest
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: int ) -> bool:
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __UpperCamelCase ( self ) ->str:
'''simple docstring'''
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def __UpperCamelCase ( self ) ->Optional[Any]:
'''simple docstring'''
with self.assertRaises(lowerCamelCase ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , 'Zero doesn\'t have any positive factors, primes must have exactly two.' , )
self.assertFalse(
is_prime(1 ) , 'One only has 1 positive factor, primes must have exactly two.' , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 448
|
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : int = logging.get_logger(__name__)
__UpperCamelCase : List[str] = {
"""vocab_file""": """vocab.txt""",
"""merges_file""": """bpe.codes""",
}
__UpperCamelCase : List[str] = {
"""vocab_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""",
},
"""merges_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""",
},
}
__UpperCamelCase : Union[str, Any] = {
"""vinai/phobert-base""": 256,
"""vinai/phobert-large""": 256,
}
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: int ) -> List[str]:
"""simple docstring"""
__a = set()
__a = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__a = char
__a = set(SCREAMING_SNAKE_CASE__ )
return pairs
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
__a =VOCAB_FILES_NAMES
__a =PRETRAINED_VOCAB_FILES_MAP
__a =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="</s>" , lowerCamelCase="<s>" , lowerCamelCase="<unk>" , lowerCamelCase="<pad>" , lowerCamelCase="<mask>" , **lowerCamelCase , ) ->str:
'''simple docstring'''
super().__init__(
bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , **lowerCamelCase , )
__a = vocab_file
__a = merges_file
__a = {}
__a = 0
__a = 1
__a = 2
__a = 3
self.add_from_file(lowerCamelCase )
__a = {v: k for k, v in self.encoder.items()}
with open(lowerCamelCase , encoding='utf-8' ) as merges_handle:
__a = merges_handle.read().split('\n' )[:-1]
__a = [tuple(merge.split()[:-1] ) for merge in merges]
__a = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
__a = {}
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase = None ) ->List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__a = [self.cls_token_id]
__a = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ) ->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase )) + [1]
return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) + [1]
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase = None ) ->List[int]:
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __UpperCamelCase ( self ) ->Union[str, Any]:
'''simple docstring'''
return len(self.encoder )
def __UpperCamelCase ( self ) ->Optional[int]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def __UpperCamelCase ( self , lowerCamelCase ) ->List[str]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__a = tuple(lowerCamelCase )
__a = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
__a = get_pairs(lowerCamelCase )
if not pairs:
return token
while True:
__a = min(lowerCamelCase , key=lambda lowerCamelCase : self.bpe_ranks.get(lowerCamelCase , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
__a , __a = bigram
__a = []
__a = 0
while i < len(lowerCamelCase ):
try:
__a = word.index(lowerCamelCase , lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__a = j
if word[i] == first and i < len(lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__a = tuple(lowerCamelCase )
__a = new_word
if len(lowerCamelCase ) == 1:
break
else:
__a = get_pairs(lowerCamelCase )
__a = '@@ '.join(lowerCamelCase )
__a = word[:-4]
__a = word
return word
def __UpperCamelCase ( self , lowerCamelCase ) ->Dict:
'''simple docstring'''
__a = []
__a = re.findall(r'\S+\n?' , lowerCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(lowerCamelCase ).split(' ' ) ) )
return split_tokens
def __UpperCamelCase ( self , lowerCamelCase ) ->List[Any]:
'''simple docstring'''
return self.encoder.get(lowerCamelCase , self.encoder.get(self.unk_token ) )
def __UpperCamelCase ( self , lowerCamelCase ) ->List[Any]:
'''simple docstring'''
return self.decoder.get(lowerCamelCase , self.unk_token )
def __UpperCamelCase ( self , lowerCamelCase ) ->Dict:
'''simple docstring'''
__a = ' '.join(lowerCamelCase ).replace('@@ ' , '' ).strip()
return out_string
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase = None ) ->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__a = os.path.join(
lowerCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__a = os.path.join(
lowerCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ):
copyfile(self.vocab_file , lowerCamelCase )
if os.path.abspath(self.merges_file ) != os.path.abspath(lowerCamelCase ):
copyfile(self.merges_file , lowerCamelCase )
return out_vocab_file, out_merge_file
def __UpperCamelCase ( self , lowerCamelCase ) ->str:
'''simple docstring'''
if isinstance(lowerCamelCase , lowerCamelCase ):
try:
with open(lowerCamelCase , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(lowerCamelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(F"""Incorrect encoding detected in {f}, please rebuild the dataset""" )
return
__a = f.readlines()
for lineTmp in lines:
__a = lineTmp.strip()
__a = line.rfind(' ' )
if idx == -1:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt>\'' )
__a = line[:idx]
__a = len(self.encoder )
| 448
| 1
|
def a ( snake_case__: str ):
'''simple docstring'''
lowercase_ = hex_num.strip()
if not hex_num:
raise ValueError('''No value was passed to the function''' )
lowercase_ = hex_num[0] == '''-'''
if is_negative:
lowercase_ = hex_num[1:]
try:
lowercase_ = int(snake_case__ , 16 )
except ValueError:
raise ValueError('''Invalid value was passed to the function''' )
lowercase_ = ''''''
while int_num > 0:
lowercase_ = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('''-''' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 409
|
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class lowercase__( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Dict ) -> int:
lowercase_ = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ ) )
def _lowercase ( self : Optional[int] ) -> str:
lowercase_ = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ ) )
def _lowercase ( self : Tuple ) -> Tuple:
lowercase_ = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ ) )
def _lowercase ( self : Optional[Any] ) -> List[Any]:
lowercase_ = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ ) )
def _lowercase ( self : Optional[Any] ) -> List[str]:
lowercase_ = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ ) )
def _lowercase ( self : Any ) -> Optional[Any]:
lowercase_ = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
lowercase_ = '''fp16'''
self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ , variant=SCREAMING_SNAKE_CASE_ ) )
def _lowercase ( self : List[Any] ) -> int:
lowercase_ = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
lowercase_ = '''fp16'''
self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ , variant=SCREAMING_SNAKE_CASE_ ) )
def _lowercase ( self : Any ) -> int:
# pass variant but use the non-variant filenames
lowercase_ = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
lowercase_ = '''fp16'''
self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ , variant=SCREAMING_SNAKE_CASE_ ) )
def _lowercase ( self : str ) -> List[str]:
lowercase_ = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ = '''fp16'''
self.assertFalse(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ , variant=SCREAMING_SNAKE_CASE_ ) )
def _lowercase ( self : Union[str, Any] ) -> Tuple:
lowercase_ = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
lowercase_ = '''fp16'''
self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ , variant=SCREAMING_SNAKE_CASE_ ) )
def _lowercase ( self : Dict ) -> Any:
# pass variant but use the non-variant filenames
lowercase_ = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
lowercase_ = '''fp16'''
self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ , variant=SCREAMING_SNAKE_CASE_ ) )
def _lowercase ( self : Optional[int] ) -> Optional[Any]:
lowercase_ = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
lowercase_ = '''fp16'''
self.assertFalse(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ , variant=SCREAMING_SNAKE_CASE_ ) )
| 409
| 1
|
'''simple docstring'''
from __future__ import annotations
lowercase = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
class __lowerCamelCase :
'''simple docstring'''
def __init__( self , a__ , a__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = graph
# mapping node to its parent in resulting breadth first tree
__SCREAMING_SNAKE_CASE : dict[str, str | None] = {}
__SCREAMING_SNAKE_CASE : Any = source_vertex
def a_ ( self ):
__SCREAMING_SNAKE_CASE : str = {self.source_vertex}
__SCREAMING_SNAKE_CASE : int = None
__SCREAMING_SNAKE_CASE : Union[str, Any] = [self.source_vertex] # first in first out queue
while queue:
__SCREAMING_SNAKE_CASE : Any = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(a__ )
__SCREAMING_SNAKE_CASE : int = vertex
queue.append(a__ )
def a_ ( self , a__ ):
if target_vertex == self.source_vertex:
return self.source_vertex
__SCREAMING_SNAKE_CASE : Optional[int] = self.parent.get(a__ )
if target_vertex_parent is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = (
f'No path from vertex: {self.source_vertex} to vertex: {target_vertex}'
)
raise ValueError(a__ )
return self.shortest_path(a__ ) + f'->{target_vertex}'
if __name__ == "__main__":
lowercase = Graph(graph, '''G''')
g.breath_first_search()
print(g.shortest_path('''D'''))
print(g.shortest_path('''G'''))
print(g.shortest_path('''Foo'''))
| 211
|
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@staticmethod
@abstractmethod
def a_ ( a__ ):
raise NotImplementedError()
@abstractmethod
def a_ ( self ):
raise NotImplementedError()
| 211
| 1
|
'''simple docstring'''
import math
def snake_case_ ( _lowerCAmelCase : Optional[int] ) -> int:
if not isinstance(_lowercase , _lowercase ):
UpperCAmelCase : Any = f"""Input value of [number={number}] must be an integer"""
raise TypeError(_lowercase )
if number < 1:
UpperCAmelCase : Optional[int] = f"""Input value of [number={number}] must be > 0"""
raise ValueError(_lowercase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
UpperCAmelCase : Union[str, Any] = int(math.log(number // 3 , 2 ) ) + 2
UpperCAmelCase : Dict = [3, 5]
UpperCAmelCase : List[str] = 2
UpperCAmelCase : str = 3
for block in range(1 , _lowercase ):
for _ in range(_lowercase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
UpperCamelCase__: int = 0
try:
UpperCamelCase__: Union[str, Any] = proth(number)
except ValueError:
print(F"ValueError: there is no {number}th Proth number")
continue
print(F"The {number}th Proth number: {value}")
| 704
|
'''simple docstring'''
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
def A ( self : Union[str, Any] ) -> Any:
UpperCAmelCase : Optional[int] = '''hf-internal-testing/tiny-random-t5'''
UpperCAmelCase : int = AutoTokenizer.from_pretrained(__snake_case )
UpperCAmelCase : Tuple = AutoModelForSeqaSeqLM.from_pretrained(__snake_case )
UpperCAmelCase : Optional[Any] = tokenizer('''This is me''' , return_tensors='''pt''' )
UpperCAmelCase : Union[str, Any] = model.to_bettertransformer()
self.assertTrue(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
UpperCAmelCase : Dict = model.generate(**__snake_case )
UpperCAmelCase : Union[str, Any] = model.reverse_bettertransformer()
self.assertFalse(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__snake_case )
UpperCAmelCase : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(__snake_case )
self.assertFalse(
any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
UpperCAmelCase : Dict = model_reloaded.generate(**__snake_case )
self.assertTrue(torch.allclose(__snake_case , __snake_case ) )
def A ( self : Optional[int] ) -> List[Any]:
UpperCAmelCase : Dict = '''hf-internal-testing/tiny-random-t5'''
UpperCAmelCase : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(__snake_case )
UpperCAmelCase : List[Any] = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(__snake_case ):
model.save_pretrained(__snake_case )
UpperCAmelCase : Any = model.reverse_bettertransformer()
model.save_pretrained(__snake_case )
| 528
| 0
|
"""simple docstring"""
import comet # From: unbabel-comet
import torch
import datasets
_lowerCAmelCase :Tuple = datasets.logging.get_logger(__name__)
_lowerCAmelCase :List[str] = '\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = "{COMET}: A Neural Framework for {MT} Evaluation",\n author = "Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon",\n booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",\n month = nov,\n year = "2020",\n address = "Online",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",\n pages = "2685--2702",\n}\n'
_lowerCAmelCase :int = '\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n'
_lowerCAmelCase :Optional[int] = '\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric(\'comet\')\n >>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use\n >>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]\n >>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]\n >>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results["scores"]])\n [0.19, 0.92]\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''sources''': datasets.Value('''string''' , id='''sequence''' ),
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[
'''https://github.com/Unbabel/COMET''',
'''https://www.aclweb.org/anthology/2020.emnlp-main.213/''',
'''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''',
] , )
def __lowerCAmelCase ( self , A ) -> List[Any]:
if self.config_name == "default":
_UpperCAmelCase : int = comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da''' ) )
else:
_UpperCAmelCase : Optional[Any] = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def __lowerCAmelCase ( self , A , A , A , A=None , A=False ) -> int:
if gpus is None:
_UpperCAmelCase : Optional[int] = 1 if torch.cuda.is_available() else 0
_UpperCAmelCase : Any = {'''src''': sources, '''mt''': predictions, '''ref''': references}
_UpperCAmelCase : int = [dict(zip(A , A ) ) for t in zip(*data.values() )]
_UpperCAmelCase , _UpperCAmelCase : Any = self.scorer.predict(A , gpus=A , progress_bar=A )
return {"mean_score": mean_score, "scores": scores}
| 506
|
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ =42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 506
| 1
|
import flax.linen as nn
import jax
import jax.numpy as jnp
class a_ ( nn.Module ):
UpperCamelCase__ : int
UpperCamelCase__ : jnp.dtype =jnp.floataa
def __a ( self :Dict) -> List[str]:
UpperCAmelCase_ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self :str , _lowercase :Tuple) -> Dict:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = hidden_states.shape
UpperCAmelCase_ = jax.image.resize(
_lowercase , shape=(batch, height * 2, width * 2, channels) , method='''nearest''' , )
UpperCAmelCase_ = self.conv(_lowercase)
return hidden_states
class a_ ( nn.Module ):
UpperCamelCase__ : int
UpperCamelCase__ : jnp.dtype =jnp.floataa
def __a ( self :str) -> int:
UpperCAmelCase_ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self :List[str] , _lowercase :Any) -> Optional[Any]:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
UpperCAmelCase_ = self.conv(_lowercase)
return hidden_states
class a_ ( nn.Module ):
UpperCamelCase__ : int
UpperCamelCase__ : int =None
UpperCamelCase__ : float =0.0
UpperCamelCase__ : bool =None
UpperCamelCase__ : jnp.dtype =jnp.floataa
def __a ( self :List[Any]) -> Dict:
UpperCAmelCase_ = self.in_channels if self.out_channels is None else self.out_channels
UpperCAmelCase_ = nn.GroupNorm(num_groups=32 , epsilon=1E-5)
UpperCAmelCase_ = nn.Conv(
_lowercase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
UpperCAmelCase_ = nn.Dense(_lowercase , dtype=self.dtype)
UpperCAmelCase_ = nn.GroupNorm(num_groups=32 , epsilon=1E-5)
UpperCAmelCase_ = nn.Dropout(self.dropout_prob)
UpperCAmelCase_ = nn.Conv(
_lowercase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
UpperCAmelCase_ = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
UpperCAmelCase_ = None
if use_nin_shortcut:
UpperCAmelCase_ = nn.Conv(
_lowercase , kernel_size=(1, 1) , strides=(1, 1) , padding='''VALID''' , dtype=self.dtype , )
def __call__( self :int , _lowercase :Any , _lowercase :Optional[Any] , _lowercase :Union[str, Any]=True) -> List[str]:
UpperCAmelCase_ = hidden_states
UpperCAmelCase_ = self.norma(_lowercase)
UpperCAmelCase_ = nn.swish(_lowercase)
UpperCAmelCase_ = self.conva(_lowercase)
UpperCAmelCase_ = self.time_emb_proj(nn.swish(_lowercase))
UpperCAmelCase_ = jnp.expand_dims(jnp.expand_dims(_lowercase , 1) , 1)
UpperCAmelCase_ = hidden_states + temb
UpperCAmelCase_ = self.norma(_lowercase)
UpperCAmelCase_ = nn.swish(_lowercase)
UpperCAmelCase_ = self.dropout(_lowercase , _lowercase)
UpperCAmelCase_ = self.conva(_lowercase)
if self.conv_shortcut is not None:
UpperCAmelCase_ = self.conv_shortcut(_lowercase)
return hidden_states + residual
| 712
|
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def A ( ) -> Optional[int]:
'''simple docstring'''
raise RuntimeError('''CUDA out of memory.''' )
class a_ ( nn.Module ):
def __init__( self :Dict) -> Any:
super().__init__()
UpperCAmelCase_ = nn.Linear(3 , 4)
UpperCAmelCase_ = nn.BatchNormad(4)
UpperCAmelCase_ = nn.Linear(4 , 5)
def __a ( self :str , _lowercase :int) -> str:
return self.lineara(self.batchnorm(self.lineara(_lowercase)))
class a_ ( unittest.TestCase ):
def __a ( self :Any) -> int:
UpperCAmelCase_ = []
@find_executable_batch_size(starting_batch_size=128)
def mock_training_loop_function(_lowercase :List[str]):
nonlocal batch_sizes
batch_sizes.append(_lowercase)
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(_lowercase , [128, 64, 32, 16, 8])
def __a ( self :Union[str, Any]) -> Union[str, Any]:
UpperCAmelCase_ = []
@find_executable_batch_size(starting_batch_size=128)
def mock_training_loop_function(_lowercase :Optional[int] , _lowercase :str):
nonlocal batch_sizes
batch_sizes.append(_lowercase)
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
UpperCAmelCase_ , UpperCAmelCase_ = mock_training_loop_function('''hello''')
self.assertListEqual(_lowercase , [128, 64, 32, 16, 8])
self.assertListEqual([bs, arga] , [8, '''hello'''])
def __a ( self :Optional[Any]) -> str:
@find_executable_batch_size(starting_batch_size=0)
def mock_training_loop_function(_lowercase :Optional[Any]):
pass
with self.assertRaises(_lowercase) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0])
def __a ( self :Any) -> Optional[Any]:
@find_executable_batch_size(starting_batch_size=16)
def mock_training_loop_function(_lowercase :Tuple):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(_lowercase) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0])
def __a ( self :str) -> Dict:
@find_executable_batch_size(starting_batch_size=128)
def mock_training_loop_function(_lowercase :List[Any] , _lowercase :Union[str, Any] , _lowercase :Tuple):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(_lowercase) as cm:
mock_training_loop_function(128 , '''hello''' , '''world''')
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0])
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0])
def __a ( self :Optional[int]) -> Any:
@find_executable_batch_size(starting_batch_size=16)
def mock_training_loop_function(_lowercase :List[str]):
raise ValueError('''Oops, we had an error!''')
with self.assertRaises(_lowercase) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0])
@require_cuda
def __a ( self :List[Any]) -> Union[str, Any]:
UpperCAmelCase_ = torch.cuda.memory_allocated()
UpperCAmelCase_ = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , _lowercase)
UpperCAmelCase_ = release_memory(_lowercase)
self.assertEqual(torch.cuda.memory_allocated() , _lowercase)
| 561
| 0
|
"""simple docstring"""
def lowercase__ ( snake_case_ :int , snake_case_ :int ):
while second != 0:
__UpperCAmelCase = first & second
first ^= second
__UpperCAmelCase = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : List[Any] = int(input('Enter the first number: ').strip())
_lowercase : Tuple = int(input('Enter the second number: ').strip())
print(f"""{add(first, second) = }""")
| 49
|
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__UpperCAmelCase = TypeVar('''KEY''')
__UpperCAmelCase = TypeVar('''VAL''')
@dataclass(frozen=a__ , slots=a__ )
class a__ ( Generic[KEY, VAL] ):
'''simple docstring'''
lowercase__ : KEY
lowercase__ : VAL
class a__ ( _Item ):
'''simple docstring'''
def __init__( self ) -> None:
super().__init__(lowerCamelCase_ , lowerCamelCase_ )
def __bool__( self ) -> bool:
return False
__UpperCAmelCase = _DeletedItem()
class a__ ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ = 8 , lowerCamelCase_ = 0.75 ) -> None:
lowerCAmelCase__ = initial_block_size
lowerCAmelCase__ = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCAmelCase__ = capacity_factor
lowerCAmelCase__ = 0
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
return hash(lowerCamelCase_ ) % len(self._buckets )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
return (ind + 1) % len(self._buckets )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> bool:
lowerCAmelCase__ = self._buckets[ind]
if not stored:
lowerCAmelCase__ = _Item(lowerCamelCase_ , lowerCamelCase_ )
self._len += 1
return True
elif stored.key == key:
lowerCAmelCase__ = _Item(lowerCamelCase_ , lowerCamelCase_ )
return True
else:
return False
def __SCREAMING_SNAKE_CASE ( self ) -> bool:
lowerCAmelCase__ = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCAmelCase__ = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> None:
lowerCAmelCase__ = self._buckets
lowerCAmelCase__ = [None] * new_size
lowerCAmelCase__ = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def __SCREAMING_SNAKE_CASE ( self ) -> None:
self._resize(len(self._buckets ) * 2 )
def __SCREAMING_SNAKE_CASE ( self ) -> None:
self._resize(len(self._buckets ) // 2 )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Iterator[int]:
lowerCAmelCase__ = self._get_bucket_index(lowerCamelCase_ )
for _ in range(len(self._buckets ) ):
yield ind
lowerCAmelCase__ = self._get_next_ind(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> None:
for ind in self._iterate_buckets(lowerCamelCase_ ):
if self._try_set(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
break
def __setitem__( self , lowerCamelCase_ , lowerCamelCase_ ) -> None:
if self._is_full():
self._size_up()
self._add_item(lowerCamelCase_ , lowerCamelCase_ )
def __delitem__( self , lowerCamelCase_ ) -> None:
for ind in self._iterate_buckets(lowerCamelCase_ ):
lowerCAmelCase__ = self._buckets[ind]
if item is None:
raise KeyError(lowerCamelCase_ )
if item is _deleted:
continue
if item.key == key:
lowerCAmelCase__ = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , lowerCamelCase_ ) -> VAL:
for ind in self._iterate_buckets(lowerCamelCase_ ):
lowerCAmelCase__ = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowerCamelCase_ )
def __len__( self ) -> int:
return self._len
def __iter__( self ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self ) -> str:
lowerCAmelCase__ = ''' ,'''.join(
F"""{item.key}: {item.val}""" for item in self._buckets if item )
return F"""HashMap({val_string})"""
| 90
| 0
|
"""simple docstring"""
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
__A : Dict = get_tests_dir('fixtures/dummy-config.json')
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : Dict ):
__lowercase : List[str] = 0
def snake_case ( self : Optional[int] ):
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("transformers.models.auto" ) )
def snake_case ( self : Dict ):
__lowercase : Optional[int] = AutoConfig.from_pretrained("bert-base-uncased" )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case ( self : str ):
__lowercase : str = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case ( self : str ):
__lowercase : Tuple = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case ( self : Union[str, Any] ):
__lowercase : str = AutoConfig.for_model("roberta" )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case ( self : int ):
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
__lowercase : str = os.path.join(lowerCAmelCase__ , "fake-roberta" )
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , "config.json" ) , "w" ) as f:
f.write(json.dumps({} ) )
__lowercase : str = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(type(lowerCAmelCase__ ) , lowerCAmelCase__ )
def snake_case ( self : Union[str, Any] ):
try:
AutoConfig.register("custom" , lowerCAmelCase__ )
# Wrong model type will raise an error
with self.assertRaises(lowerCAmelCase__ ):
AutoConfig.register("model" , lowerCAmelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCAmelCase__ ):
AutoConfig.register("bert" , lowerCAmelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
__lowercase : int = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase__ )
__lowercase : int = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def snake_case ( self : Optional[Any] ):
with self.assertRaisesRegex(
lowerCAmelCase__ , "bert-base is not a local folder and is not a valid model identifier" ):
__lowercase : Optional[Any] = AutoConfig.from_pretrained("bert-base" )
def snake_case ( self : int ):
with self.assertRaisesRegex(
lowerCAmelCase__ , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
__lowercase : List[Any] = AutoConfig.from_pretrained(lowerCAmelCase__ , revision="aaaaaa" )
def snake_case ( self : Any ):
with self.assertRaisesRegex(
lowerCAmelCase__ , "hf-internal-testing/no-config-test-repo does not appear to have a file named config.json." , ):
__lowercase : Optional[int] = AutoConfig.from_pretrained("hf-internal-testing/no-config-test-repo" )
def snake_case ( self : str ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowerCAmelCase__ ):
__lowercase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCAmelCase__ ):
__lowercase : Dict = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowerCAmelCase__ )
__lowercase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowerCAmelCase__ )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase__ )
__lowercase : Optional[Any] = AutoConfig.from_pretrained(lowerCAmelCase__ , trust_remote_code=lowerCAmelCase__ )
self.assertEqual(reloaded_config.__class__.__name__ , "NewModelConfig" )
def snake_case ( self : Tuple ):
class lowerCAmelCase__ ( _snake_case ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = "new-model"
try:
AutoConfig.register("new-model" , lowerCAmelCase__ )
# If remote code is not set, the default is to use local
__lowercase : List[str] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote code is disabled, we load the local one.
__lowercase : List[str] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowerCAmelCase__ )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote is enabled, we load from the Hub
__lowercase : Optional[int] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowerCAmelCase__ )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 701
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] , lowercase__ : List[Any] , lowercase__ : int=1_3 , lowercase__ : Optional[int]=7 , lowercase__ : Any=True , lowercase__ : int=True , lowercase__ : List[Any]=True , lowercase__ : Union[str, Any]=True , lowercase__ : Any=9_9 , lowercase__ : Tuple=[1, 1, 2] , lowercase__ : str=1 , lowercase__ : Union[str, Any]=3_2 , lowercase__ : int=4 , lowercase__ : Dict=8 , lowercase__ : Tuple=3_7 , lowercase__ : int="gelu_new" , lowercase__ : Tuple=0.1 , lowercase__ : int=0.1 , lowercase__ : Dict=0.0 , lowercase__ : int=5_1_2 , lowercase__ : str=3 , lowercase__ : List[Any]=0.0_2 , lowercase__ : Any=3 , lowercase__ : Union[str, Any]=4 , lowercase__ : Tuple=None , lowercase__ : List[Any]=False , ):
__lowercase : Any = parent
__lowercase : Tuple = batch_size
__lowercase : Union[str, Any] = seq_length
__lowercase : List[Any] = is_training
__lowercase : Tuple = use_input_mask
__lowercase : Optional[int] = use_token_type_ids
__lowercase : List[Any] = use_labels
__lowercase : Any = vocab_size
__lowercase : Union[str, Any] = block_sizes
__lowercase : Optional[Any] = num_decoder_layers
__lowercase : str = d_model
__lowercase : Tuple = n_head
__lowercase : Any = d_head
__lowercase : Dict = d_inner
__lowercase : Optional[Any] = hidden_act
__lowercase : int = hidden_dropout
__lowercase : int = attention_dropout
__lowercase : Tuple = activation_dropout
__lowercase : int = max_position_embeddings
__lowercase : Optional[Any] = type_vocab_size
__lowercase : Union[str, Any] = 2
__lowercase : Optional[int] = num_labels
__lowercase : List[str] = num_choices
__lowercase : List[Any] = scope
__lowercase : List[str] = initializer_std
# Used in the tests to check the size of the first attention layer
__lowercase : str = n_head
# Used in the tests to check the size of the first hidden state
__lowercase : List[str] = self.d_model
# Used in the tests to check the number of output hidden states/attentions
__lowercase : Optional[Any] = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
__lowercase : Dict = self.num_hidden_layers + 2
def snake_case ( self : List[Any] ):
__lowercase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : Optional[int] = None
if self.use_input_mask:
__lowercase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : List[str] = None
if self.use_token_type_ids:
__lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase : Any = None
__lowercase : str = None
__lowercase : str = None
if self.use_labels:
__lowercase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__lowercase : Union[str, Any] = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def snake_case ( self : Dict , lowercase__ : str , lowercase__ : List[str] , lowercase__ : Dict , lowercase__ : int , lowercase__ : Union[str, Any] , lowercase__ : Any , lowercase__ : Optional[int] , ):
__lowercase : int = TFFunnelModel(config=lowercase__ )
__lowercase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__lowercase : List[Any] = model(lowercase__ )
__lowercase : Optional[Any] = [input_ids, input_mask]
__lowercase : int = model(lowercase__ )
__lowercase : Tuple = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
__lowercase : int = False
__lowercase : int = TFFunnelModel(config=lowercase__ )
__lowercase : List[str] = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
__lowercase : str = False
__lowercase : int = TFFunnelModel(config=lowercase__ )
__lowercase : Dict = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def snake_case ( self : Union[str, Any] , lowercase__ : List[Any] , lowercase__ : Union[str, Any] , lowercase__ : Optional[int] , lowercase__ : int , lowercase__ : str , lowercase__ : Dict , lowercase__ : Union[str, Any] , ):
__lowercase : List[str] = TFFunnelBaseModel(config=lowercase__ )
__lowercase : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__lowercase : List[Any] = model(lowercase__ )
__lowercase : List[Any] = [input_ids, input_mask]
__lowercase : Optional[int] = model(lowercase__ )
__lowercase : Optional[Any] = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
__lowercase : Any = False
__lowercase : Any = TFFunnelBaseModel(config=lowercase__ )
__lowercase : Union[str, Any] = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
__lowercase : List[Any] = False
__lowercase : Optional[int] = TFFunnelBaseModel(config=lowercase__ )
__lowercase : Optional[int] = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def snake_case ( self : Optional[int] , lowercase__ : str , lowercase__ : Tuple , lowercase__ : Any , lowercase__ : List[Any] , lowercase__ : Dict , lowercase__ : List[str] , lowercase__ : Optional[Any] , ):
__lowercase : Tuple = TFFunnelForPreTraining(config=lowercase__ )
__lowercase : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__lowercase : Union[str, Any] = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def snake_case ( self : int , lowercase__ : List[Any] , lowercase__ : List[Any] , lowercase__ : Any , lowercase__ : int , lowercase__ : Any , lowercase__ : str , lowercase__ : Optional[Any] , ):
__lowercase : Optional[int] = TFFunnelForMaskedLM(config=lowercase__ )
__lowercase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__lowercase : Optional[int] = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self : Union[str, Any] , lowercase__ : str , lowercase__ : Any , lowercase__ : Any , lowercase__ : Union[str, Any] , lowercase__ : Dict , lowercase__ : int , lowercase__ : int , ):
__lowercase : str = self.num_labels
__lowercase : List[Any] = TFFunnelForSequenceClassification(config=lowercase__ )
__lowercase : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__lowercase : int = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self : Tuple , lowercase__ : Tuple , lowercase__ : List[str] , lowercase__ : Dict , lowercase__ : Optional[int] , lowercase__ : Union[str, Any] , lowercase__ : str , lowercase__ : Union[str, Any] , ):
__lowercase : Dict = self.num_choices
__lowercase : List[str] = TFFunnelForMultipleChoice(config=lowercase__ )
__lowercase : Any = tf.tile(tf.expand_dims(lowercase__ , 1 ) , (1, self.num_choices, 1) )
__lowercase : Optional[Any] = tf.tile(tf.expand_dims(lowercase__ , 1 ) , (1, self.num_choices, 1) )
__lowercase : Tuple = tf.tile(tf.expand_dims(lowercase__ , 1 ) , (1, self.num_choices, 1) )
__lowercase : Optional[int] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
__lowercase : str = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case ( self : Any , lowercase__ : Tuple , lowercase__ : str , lowercase__ : List[str] , lowercase__ : Optional[int] , lowercase__ : Dict , lowercase__ : Any , lowercase__ : Optional[int] , ):
__lowercase : Tuple = self.num_labels
__lowercase : int = TFFunnelForTokenClassification(config=lowercase__ )
__lowercase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__lowercase : int = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self : Optional[int] , lowercase__ : str , lowercase__ : Tuple , lowercase__ : str , lowercase__ : List[Any] , lowercase__ : int , lowercase__ : str , lowercase__ : int , ):
__lowercase : List[str] = TFFunnelForQuestionAnswering(config=lowercase__ )
__lowercase : str = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__lowercase : Union[str, Any] = model(lowercase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case ( self : str ):
__lowercase : Optional[int] = self.prepare_config_and_inputs()
(
(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,
) : List[Any] = config_and_inputs
__lowercase : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase__ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
__UpperCAmelCase : Optional[int] = (
{
"feature-extraction": (TFFunnelBaseModel, TFFunnelModel),
"fill-mask": TFFunnelForMaskedLM,
"question-answering": TFFunnelForQuestionAnswering,
"text-classification": TFFunnelForSequenceClassification,
"token-classification": TFFunnelForTokenClassification,
"zero-shot": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCAmelCase : Dict = False
__UpperCAmelCase : Union[str, Any] = False
def snake_case ( self : Dict ):
__lowercase : List[Any] = TFFunnelModelTester(self )
__lowercase : Tuple = ConfigTester(self , config_class=lowercase__ )
def snake_case ( self : List[Any] ):
self.config_tester.run_common_tests()
def snake_case ( self : Any ):
__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def snake_case ( self : List[Any] ):
__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase__ )
def snake_case ( self : str ):
__lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase__ )
def snake_case ( self : Any ):
__lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase__ )
def snake_case ( self : str ):
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase__ )
@require_tf
class lowerCAmelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
__UpperCAmelCase : str = False
__UpperCAmelCase : Optional[Any] = False
def snake_case ( self : List[Any] ):
__lowercase : Optional[int] = TFFunnelModelTester(self , base=lowercase__ )
__lowercase : List[Any] = ConfigTester(self , config_class=lowercase__ )
def snake_case ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def snake_case ( self : List[str] ):
__lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*lowercase__ )
def snake_case ( self : Union[str, Any] ):
__lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase__ )
def snake_case ( self : List[Any] ):
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase__ )
| 281
| 0
|
from __future__ import annotations
from collections import Counter
from random import random
class _snake_case :
def __init__( self):
'''simple docstring'''
lowercase__ : List[Any] = {}
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Optional[int] = {}
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if nodea not in self.connections:
self.add_node(SCREAMING_SNAKE_CASE_)
if nodea not in self.connections:
self.add_node(SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = probability
def lowercase__ ( self):
'''simple docstring'''
return list(self.connections)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Dict = 0
lowercase__ : List[Any] = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> dict[str, int]:
'''simple docstring'''
lowercase__ : List[Any] = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(lowercase_ , lowercase_ , lowercase_ )
lowercase__ : Union[str, Any] = Counter(graph.get_nodes() )
lowercase__ : Tuple = start
for _ in range(lowercase_ ):
lowercase__ : Optional[Any] = graph.transition(lowercase_ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12
|
'''simple docstring'''
from collections import namedtuple
snake_case : Optional[int] = namedtuple('from_to', 'from_ to')
snake_case : Any = {
'cubicmeter': from_to(1, 1),
'litre': from_to(0.001, 1_000),
'kilolitre': from_to(1, 1),
'gallon': from_to(0.0_0454, 264.172),
'cubicyard': from_to(0.7_6455, 1.3_0795),
'cubicfoot': from_to(0.028, 35.3147),
'cup': from_to(0.0_0023_6588, 4226.75),
}
def lowercase__ ( __UpperCamelCase : float , __UpperCamelCase : str , __UpperCamelCase : str ):
'''simple docstring'''
if from_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n'''
+ """, """.join(__UpperCamelCase ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'''
+ """, """.join(__UpperCamelCase ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 566
| 0
|
from math import ceil
def _UpperCamelCase ( UpperCamelCase_ : int = 1001 ) -> int:
"""simple docstring"""
lowerCAmelCase__ = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
lowerCAmelCase__ = 2 * i + 1
lowerCAmelCase__ = 2 * i
lowerCAmelCase__ = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
__snake_case : int = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number""")
| 365
|
from collections import deque
from .hash_table import HashTable
class __SCREAMING_SNAKE_CASE ( __lowercase):
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(_UpperCamelCase )
lowerCAmelCase__ = self.values[key]
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (
sum(self.charge_factor - len(_UpperCamelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase=None ):
"""simple docstring"""
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(_UpperCamelCase ) == 0
):
return key
return super()._collision_resolution(_UpperCamelCase , _UpperCamelCase )
| 365
| 1
|
"""simple docstring"""
import os
from distutils.util import strtobool
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> List[str]:
'''simple docstring'''
for e in env_keys:
_lowerCamelCase : List[Any] = int(os.environ.get(_lowerCamelCase , -1 ) )
if val >= 0:
return val
return default
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Any = os.environ.get(_lowerCamelCase , str(_lowerCamelCase ) )
return strtobool(_lowerCamelCase ) == 1 # As its name indicates `strtobool` actually returns an int...
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase="no" ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase : Tuple = os.environ.get(_lowerCamelCase , str(_lowerCamelCase ) )
return value
| 46
|
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
def __init__( self , __A , __A=7 , __A=3 , __A=30 , __A=400 , __A=True , __A=None , __A=True , __A=1 / 255 , __A=True , __A=[0.5, 0.5, 0.5] , __A=[0.5, 0.5, 0.5] , __A=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__UpperCAmelCase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1_333}
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = min_resolution
__UpperCAmelCase = max_resolution
__UpperCAmelCase = do_resize
__UpperCAmelCase = size
__UpperCAmelCase = do_rescale
__UpperCAmelCase = rescale_factor
__UpperCAmelCase = do_normalize
__UpperCAmelCase = image_mean
__UpperCAmelCase = image_std
__UpperCAmelCase = do_pad
def __lowerCamelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def __lowerCamelCase ( self , __A , __A=False ):
if not batched:
__UpperCAmelCase = image_inputs[0]
if isinstance(__A , Image.Image ):
__UpperCAmelCase , __UpperCAmelCase = image.size
else:
__UpperCAmelCase , __UpperCAmelCase = image.shape[1], image.shape[2]
if w < h:
__UpperCAmelCase = int(self.size['shortest_edge'] * h / w )
__UpperCAmelCase = self.size['shortest_edge']
elif w > h:
__UpperCAmelCase = self.size['shortest_edge']
__UpperCAmelCase = int(self.size['shortest_edge'] * w / h )
else:
__UpperCAmelCase = self.size['shortest_edge']
__UpperCAmelCase = self.size['shortest_edge']
else:
__UpperCAmelCase = []
for image in image_inputs:
__UpperCAmelCase , __UpperCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__UpperCAmelCase = max(__A , key=lambda __A : item[0] )[0]
__UpperCAmelCase = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
_A : Any = DetrImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
__UpperCAmelCase = DetrImageProcessingTester(self )
@property
def __lowerCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , 'image_mean' ) )
self.assertTrue(hasattr(__A , 'image_std' ) )
self.assertTrue(hasattr(__A , 'do_normalize' ) )
self.assertTrue(hasattr(__A , 'do_rescale' ) )
self.assertTrue(hasattr(__A , 'rescale_factor' ) )
self.assertTrue(hasattr(__A , 'do_resize' ) )
self.assertTrue(hasattr(__A , 'size' ) )
self.assertTrue(hasattr(__A , 'do_pad' ) )
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1_333} )
self.assertEqual(image_processor.do_pad , __A )
__UpperCAmelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__A )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , __A )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
# Initialize image_processing
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
__UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__UpperCAmelCase , __UpperCAmelCase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCAmelCase , __UpperCAmelCase = self.image_processor_tester.get_expected_values(__A , batched=__A )
__UpperCAmelCase = image_processing(__A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
# Initialize image_processing
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
__UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__UpperCAmelCase , __UpperCAmelCase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCAmelCase = image_processing(__A , return_tensors='pt' ).pixel_values
__UpperCAmelCase , __UpperCAmelCase = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
# Initialize image_processing
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
__UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__UpperCAmelCase , __UpperCAmelCase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCAmelCase = image_processing(__A , return_tensors='pt' ).pixel_values
__UpperCAmelCase , __UpperCAmelCase = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __lowerCamelCase ( self ):
# prepare image and target
__UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
__UpperCAmelCase = json.loads(f.read() )
__UpperCAmelCase = {'image_id': 39_769, 'annotations': target}
# encode them
__UpperCAmelCase = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' )
__UpperCAmelCase = image_processing(images=__A , annotations=__A , return_tensors='pt' )
# verify pixel values
__UpperCAmelCase = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape , __A )
__UpperCAmelCase = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __A , atol=1E-4 ) )
# verify area
__UpperCAmelCase = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __A ) )
# verify boxes
__UpperCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , __A )
__UpperCAmelCase = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __A , atol=1E-3 ) )
# verify image_id
__UpperCAmelCase = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __A ) )
# verify is_crowd
__UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __A ) )
# verify class_labels
__UpperCAmelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __A ) )
# verify orig_size
__UpperCAmelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __A ) )
# verify size
__UpperCAmelCase = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __A ) )
@slow
def __lowerCamelCase ( self ):
# prepare image, target and masks_path
__UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
__UpperCAmelCase = json.loads(f.read() )
__UpperCAmelCase = {'file_name': '000000039769.png', 'image_id': 39_769, 'segments_info': target}
__UpperCAmelCase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
__UpperCAmelCase = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' )
__UpperCAmelCase = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors='pt' )
# verify pixel values
__UpperCAmelCase = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape , __A )
__UpperCAmelCase = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __A , atol=1E-4 ) )
# verify area
__UpperCAmelCase = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __A ) )
# verify boxes
__UpperCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , __A )
__UpperCAmelCase = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __A , atol=1E-3 ) )
# verify image_id
__UpperCAmelCase = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __A ) )
# verify is_crowd
__UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __A ) )
# verify class_labels
__UpperCAmelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __A ) )
# verify masks
__UpperCAmelCase = 822_873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , __A )
# verify orig_size
__UpperCAmelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __A ) )
# verify size
__UpperCAmelCase = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __A ) )
| 126
| 0
|
def a_ ( __snake_case , __snake_case ) -> Dict:
'''simple docstring'''
UpperCamelCase_ = 0
UpperCamelCase_ = len(__snake_case ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
UpperCamelCase_ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__snake_case ):
return None
UpperCamelCase_ = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
UpperCamelCase_ = left
UpperCamelCase_ = point
elif point > right:
UpperCamelCase_ = right
UpperCamelCase_ = point
else:
if item < current_item:
UpperCamelCase_ = point - 1
else:
UpperCamelCase_ = point + 1
return None
def a_ ( __snake_case , __snake_case , __snake_case , __snake_case ) -> Optional[Any]:
'''simple docstring'''
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
UpperCamelCase_ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__snake_case ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(__snake_case , __snake_case , __snake_case , __snake_case )
elif point > right:
return interpolation_search_by_recursion(__snake_case , __snake_case , __snake_case , __snake_case )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
__snake_case , __snake_case , __snake_case , point - 1 )
else:
return interpolation_search_by_recursion(
__snake_case , __snake_case , point + 1 , __snake_case )
def a_ ( __snake_case ) -> List[Any]:
'''simple docstring'''
if collection != sorted(__snake_case ):
raise ValueError('Collection must be ascending sorted' )
return True
if __name__ == "__main__":
import sys
__a : Tuple = 0
if debug == 1:
__a : List[str] = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("""Sequence must be ascending sorted to apply interpolation search""")
__a : Optional[Any] = 67
__a : Optional[int] = interpolation_search(collection, target)
if result is not None:
print(f"""{target} found at positions: {result}""")
else:
print("""Not found""")
| 559
|
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__a : Union[str, Any] = logging.get_logger(__name__)
def a_ ( __snake_case ) -> str:
'''simple docstring'''
UpperCamelCase_ = torch.load(__snake_case , map_location='cpu' )
if "model" in sd.keys():
UpperCamelCase_ = torch.load(__snake_case , map_location='cpu' )['model']
# pop unnecessary weights
UpperCamelCase_ = [
'decoder.version',
'decoder.output_projection.weight',
]
for key in keys_to_delete:
if key in sd:
sd.pop(__snake_case )
UpperCamelCase_ = {
'decoder.project_in_dim.weight': 'decoder.project_in.weight',
'decoder.project_out_dim.weight': 'decoder.project_out.weight',
'decoder.layer_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.layer_norm.bias': 'decoder.final_layer_norm.bias',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
UpperCamelCase_ = sd.pop(__snake_case )
UpperCamelCase_ = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
UpperCamelCase_ = sd[key]
# We split QKV in separate Q,K,V
UpperCamelCase_ = key.replace('.qkv_proj.' , '.q_proj.' )
UpperCamelCase_ = key.replace('.qkv_proj.' , '.k_proj.' )
UpperCamelCase_ = key.replace('.qkv_proj.' , '.v_proj.' )
UpperCamelCase_ = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = torch.split(__snake_case , depth // 3 , dim=0 )
UpperCamelCase_ = q
UpperCamelCase_ = k
UpperCamelCase_ = v
del sd[key]
return sd
@torch.no_grad()
def a_ ( __snake_case , __snake_case , __snake_case=None ) -> List[Any]:
'''simple docstring'''
UpperCamelCase_ = load_checkpoint(__snake_case )
if config is not None:
UpperCamelCase_ = OPTConfig.from_pretrained(__snake_case )
else:
UpperCamelCase_ = OPTConfig()
UpperCamelCase_ = OPTModel(__snake_case ).half().eval()
model.load_state_dict(__snake_case )
# Check results
Path(__snake_case ).mkdir(exist_ok=__snake_case )
model.save_pretrained(__snake_case )
if __name__ == "__main__":
__a : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fairseq_path""",
type=str,
help=(
"""path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"""
""" https://huggingface.co/models?other=opt_metasq"""
),
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""")
__a : Union[str, Any] = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 559
| 1
|
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class A ( yaml.SafeLoader ):
'''simple docstring'''
def lowerCamelCase__ (self : Tuple , _UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = [self.constructed_objects[key_node] for key_node, _ in node.value]
lowercase__ = [tuple(lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else key for key in keys]
lowercase__ = Counter(lowerCAmelCase__ )
lowercase__ = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f'''Got duplicate yaml keys: {duplicate_keys}''' )
def lowerCamelCase__ (self : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any]=False ) -> List[Any]:
"""simple docstring"""
lowercase__ = super().construct_mapping(lowerCAmelCase__ , deep=lowerCAmelCase__ )
self._check_no_duplicates_on_constructed_node(lowerCAmelCase__ )
return mapping
def UpperCamelCase ( __magic_name__ : List[Any] ) -> Any:
"""simple docstring"""
lowercase__ = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
lowercase__ = full_content[1:].index("""---""" ) + 1
lowercase__ = """\n""".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(__magic_name__ )
class A ( __a ):
'''simple docstring'''
A__ = {'''train_eval_index'''} # train-eval-index in the YAML metadata
@classmethod
def lowerCamelCase__ (cls : int , _UpperCAmelCase : Path ) -> "DatasetMetadata":
"""simple docstring"""
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as readme_file:
lowercase__ , lowercase__ = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(lowerCAmelCase__ )
else:
return cls()
def lowerCamelCase__ (self : Dict , _UpperCAmelCase : Path ) -> List[Any]:
"""simple docstring"""
if path.exists():
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as readme_file:
lowercase__ = readme_file.read()
else:
lowercase__ = None
lowercase__ = self._to_readme(lowerCAmelCase__ )
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as readme_file:
readme_file.write(lowerCAmelCase__ )
def lowerCamelCase__ (self : str , _UpperCAmelCase : Optional[str] = None ) -> str:
"""simple docstring"""
if readme_content is not None:
lowercase__ , lowercase__ = _split_yaml_from_readme(lowerCAmelCase__ )
lowercase__ = """---\n""" + self.to_yaml_string() + """---\n""" + content
else:
lowercase__ = """---\n""" + self.to_yaml_string() + """---\n"""
return full_content
@classmethod
def lowerCamelCase__ (cls : int , _UpperCAmelCase : str ) -> "DatasetMetadata":
"""simple docstring"""
lowercase__ = yaml.load(lowerCAmelCase__ , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
lowercase__ = {
(key.replace("""-""" , """_""" ) if key.replace("""-""" , """_""" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**lowerCAmelCase__ )
def lowerCamelCase__ (self : int ) -> str:
"""simple docstring"""
return yaml.safe_dump(
{
(key.replace("""_""" , """-""" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=lowerCAmelCase__ , allow_unicode=lowerCAmelCase__ , encoding="""utf-8""" , ).decode("""utf-8""" )
A : Optional[Any] = {
"image-classification": [],
"translation": [],
"image-segmentation": [],
"fill-mask": [],
"automatic-speech-recognition": [],
"token-classification": [],
"sentence-similarity": [],
"audio-classification": [],
"question-answering": [],
"summarization": [],
"zero-shot-classification": [],
"table-to-text": [],
"feature-extraction": [],
"other": [],
"multiple-choice": [],
"text-classification": [],
"text-to-image": [],
"text2text-generation": [],
"zero-shot-image-classification": [],
"tabular-classification": [],
"tabular-regression": [],
"image-to-image": [],
"tabular-to-text": [],
"unconditional-image-generation": [],
"text-retrieval": [],
"text-to-speech": [],
"object-detection": [],
"audio-to-audio": [],
"text-generation": [],
"conversational": [],
"table-question-answering": [],
"visual-question-answering": [],
"image-to-text": [],
"reinforcement-learning": [],
"voice-activity-detection": [],
"time-series-forecasting": [],
"document-question-answering": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
A : int = ArgumentParser(usage='Validate the yaml metadata block of a README.md file.')
ap.add_argument('readme_filepath')
A : List[str] = ap.parse_args()
A : Tuple = Path(args.readme_filepath)
A : List[str] = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 15
|
# Function to print upper half of diamond (pyramid)
def __snake_case ( __magic_name__ ):
'''simple docstring'''
for i in range(0 , __magic_name__ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(" " , end="" )
for _ in range(0 , i + 1 ): # printing stars
print("* " , end="" )
print()
def __snake_case ( __magic_name__ ):
'''simple docstring'''
for i in range(__magic_name__ , 0 , -1 ):
for _ in range(__magic_name__ , 0 , -1 ): # printing stars
print("* " , end="" )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(" " , end="" )
def __snake_case ( __magic_name__ ):
'''simple docstring'''
if n <= 0:
print(" ... .... nothing printing :(" )
return
floyd(__magic_name__ ) # upper half
reverse_floyd(__magic_name__ ) # lower half
if __name__ == "__main__":
print(r"| /\ | |- | |- |--| |\ /| |-")
print(r"|/ \| |- |_ |_ |__| | \/ | |_")
_snake_case : Union[str, Any] = 1
while K:
_snake_case : Dict = int(input("enter the number and , and see the magic : "))
print()
pretty_print(user_number)
_snake_case : Tuple = int(input("press 0 to exit... and 1 to continue..."))
print("Good Bye...")
| 441
| 0
|
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
__lowerCAmelCase :Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(__A )
class _a( __A ):
def __init__( self , *__snake_case , **__snake_case ) -> Tuple:
'''simple docstring'''
super().__init__(*__snake_case , **__snake_case )
self.check_model_type(__snake_case )
def lowercase ( self , __snake_case=None , __snake_case=None , __snake_case=None , **__snake_case ) -> Dict:
'''simple docstring'''
_snake_case : Union[str, Any] = {}, {}
if padding is not None:
_snake_case : Optional[int] = padding
if truncation is not None:
_snake_case : int = truncation
if top_k is not None:
_snake_case : str = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , __snake_case , __snake_case = None , **__snake_case ) -> Any:
'''simple docstring'''
if isinstance(__snake_case , (Image.Image, str) ) and isinstance(__snake_case , __snake_case ):
_snake_case : Tuple = {"image": image, "question": question}
else:
_snake_case : str = image
_snake_case : Tuple = super().__call__(__snake_case , **__snake_case )
return results
def lowercase ( self , __snake_case , __snake_case=False , __snake_case=False ) -> Union[str, Any]:
'''simple docstring'''
_snake_case : Tuple = load_image(inputs["image"] )
_snake_case : Optional[Any] = self.tokenizer(
inputs["question"] , return_tensors=self.framework , padding=__snake_case , truncation=__snake_case )
_snake_case : Any = self.image_processor(images=__snake_case , return_tensors=self.framework )
model_inputs.update(__snake_case )
return model_inputs
def lowercase ( self , __snake_case ) -> Dict:
'''simple docstring'''
_snake_case : Union[str, Any] = self.model(**__snake_case )
return model_outputs
def lowercase ( self , __snake_case , __snake_case=5 ) -> Dict:
'''simple docstring'''
if top_k > self.model.config.num_labels:
_snake_case : List[Any] = self.model.config.num_labels
if self.framework == "pt":
_snake_case : List[str] = model_outputs.logits.sigmoid()[0]
_snake_case : int = probs.topk(__snake_case )
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
_snake_case : Tuple = scores.tolist()
_snake_case : List[str] = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(__snake_case , __snake_case )]
| 705
|
def A ( UpperCAmelCase ):
if n == 1 or not isinstance(UpperCAmelCase , UpperCAmelCase ):
return 0
elif n == 2:
return 1
else:
_snake_case : List[Any] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def A ( UpperCAmelCase ):
_snake_case : Tuple = 0
_snake_case : Optional[Any] = 2
while digits < n:
index += 1
_snake_case : str = len(str(fibonacci(UpperCAmelCase ) ) )
return index
def A ( UpperCAmelCase = 1_000 ):
return fibonacci_digits_index(UpperCAmelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 278
| 0
|
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : Optional[Any] =''''''
a : str =(
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
a : str =None # compression type in fsspec. ex: "gzip"
a : str =None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self , _lowerCamelCase = "" , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase ):
super().__init__(self , **_lowerCamelCase )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
UpperCamelCase_: Union[str, Any] = fsspec.open(
_lowerCamelCase , mode='rb' , protocol=_lowerCamelCase , compression=self.compression , client_kwargs={
'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459
'trust_env': True, # Enable reading proxy env variables.
**(target_options or {}).pop('client_kwargs' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
UpperCamelCase_: Optional[Any] = os.path.basename(self.file.path.split('::' )[0] )
UpperCamelCase_: Union[str, Any] = (
self.compressed_name[: self.compressed_name.rindex('.' )]
if '.' in self.compressed_name
else self.compressed_name
)
UpperCamelCase_: List[Any] = None
@classmethod
def _a ( cls , _lowerCamelCase ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(_lowerCamelCase ).lstrip('/' )
def _a ( self ):
if self.dir_cache is None:
UpperCamelCase_: Any = {**self.file.fs.info(self.file.path ), 'name': self.uncompressed_name}
UpperCamelCase_: Tuple = {f['name']: f}
def _a ( self , _lowerCamelCase ):
return self.file.open().read()
def _a ( self , _lowerCamelCase , _lowerCamelCase = "rb" , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase , ):
UpperCamelCase_: Union[str, Any] = self._strip_protocol(_lowerCamelCase )
if mode != "rb":
raise ValueError(f'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : int ='''bz2'''
a : Union[str, Any] ='''bz2'''
a : Optional[Any] ='''.bz2'''
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : List[Any] ='''gzip'''
a : List[str] ='''gzip'''
a : Any ='''.gz'''
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : List[Any] ='''lz4'''
a : int ='''lz4'''
a : Union[str, Any] ='''.lz4'''
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : Optional[Any] ='''xz'''
a : int ='''xz'''
a : Union[str, Any] ='''.xz'''
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : List[Any] ='''zstd'''
a : List[Any] ='''zstd'''
a : List[str] ='''.zst'''
def __init__( self , _lowerCamelCase , _lowerCamelCase = "rb" , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = DEFAULT_BLOCK_SIZE , **_lowerCamelCase , ):
super().__init__(
fo=_lowerCamelCase , mode=_lowerCamelCase , target_protocol=_lowerCamelCase , target_options=_lowerCamelCase , block_size=_lowerCamelCase , **_lowerCamelCase , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
UpperCamelCase_: str = self.file.__enter__
class _lowerCAmelCase:
"""simple docstring"""
def __init__( self , _lowerCamelCase ):
UpperCamelCase_: Any = file_
def __enter__( self ):
self._file.__enter__()
return self
def __exit__( self , *_lowerCamelCase , **_lowerCamelCase ):
self._file.__exit__(*_lowerCamelCase , **_lowerCamelCase )
def __iter__( self ):
return iter(self._file )
def _a ( self ):
return next(self._file )
def __getattr__( self , _lowerCamelCase ):
return getattr(self._file , _lowerCamelCase )
def fixed_enter(*_lowerCamelCase , **_lowerCamelCase ):
return WrappedFile(_enter(*_lowerCamelCase , **_lowerCamelCase ) )
UpperCamelCase_: Tuple = fixed_enter
| 57
|
"""simple docstring"""
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCamelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
a_ : str = GPTaTokenizer
a_ : Dict = GPTaTokenizerFast
a_ : List[Any] = True
a_ : str = {"""add_prefix_space""": True}
a_ : Dict = False
def lowerCamelCase ( self : Tuple ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase_ : Optional[Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
lowerCAmelCase_ : List[Any] = dict(zip(a_ , range(len(a_ ) ) ) )
lowerCAmelCase_ : List[str] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowerCAmelCase_ : Optional[Any] = {"unk_token": "<unk>"}
lowerCAmelCase_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(a_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(a_ ) )
def lowerCamelCase ( self : Optional[Any] , **a_ : Optional[Any] ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **a_ )
def lowerCamelCase ( self : List[Any] , **a_ : int ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **a_ )
def lowerCamelCase ( self : Tuple , a_ : Optional[Any] ):
lowerCAmelCase_ : str = "lower newer"
lowerCAmelCase_ : Dict = "lower newer"
return input_text, output_text
def lowerCamelCase ( self : int ):
lowerCAmelCase_ : Any = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCAmelCase_ : str = "lower newer"
lowerCAmelCase_ : List[str] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
lowerCAmelCase_ : Optional[Any] = tokenizer.tokenize(a_ , add_prefix_space=a_ )
self.assertListEqual(a_ , a_ )
lowerCAmelCase_ : List[Any] = tokens + [tokenizer.unk_token]
lowerCAmelCase_ : int = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , a_ )
def lowerCamelCase ( self : str ):
if not self.test_rust_tokenizer:
return
lowerCAmelCase_ : int = self.get_tokenizer()
lowerCAmelCase_ : Optional[Any] = self.get_rust_tokenizer(add_prefix_space=a_ )
lowerCAmelCase_ : int = "lower newer"
# Testing tokenization
lowerCAmelCase_ : Optional[int] = tokenizer.tokenize(a_ , add_prefix_space=a_ )
lowerCAmelCase_ : int = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
# Testing conversion to ids without special tokens
lowerCAmelCase_ : List[str] = tokenizer.encode(a_ , add_special_tokens=a_ , add_prefix_space=a_ )
lowerCAmelCase_ : List[str] = rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
# Testing conversion to ids with special tokens
lowerCAmelCase_ : Any = self.get_rust_tokenizer(add_prefix_space=a_ )
lowerCAmelCase_ : List[Any] = tokenizer.encode(a_ , add_prefix_space=a_ )
lowerCAmelCase_ : str = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
# Testing the unknown token
lowerCAmelCase_ : List[str] = tokens + [rust_tokenizer.unk_token]
lowerCAmelCase_ : Dict = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(a_ ) , a_ )
def lowerCamelCase ( self : Dict , *a_ : Optional[int] , **a_ : Union[str, Any] ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def lowerCamelCase ( self : Optional[int] , a_ : List[Any]=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase_ : str = self.rust_tokenizer_class.from_pretrained(a_ , **a_ )
# Simple input
lowerCAmelCase_ : Optional[int] = "This is a simple input"
lowerCAmelCase_ : List[Any] = ["This is a simple input 1", "This is a simple input 2"]
lowerCAmelCase_ : List[str] = ("This is a simple input", "This is a pair")
lowerCAmelCase_ : List[Any] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(a_ , tokenizer_r.encode , a_ , max_length=a_ , padding="max_length" )
# Simple input
self.assertRaises(a_ , tokenizer_r.encode_plus , a_ , max_length=a_ , padding="max_length" )
# Simple input
self.assertRaises(
a_ , tokenizer_r.batch_encode_plus , a_ , max_length=a_ , padding="max_length" , )
# Pair input
self.assertRaises(a_ , tokenizer_r.encode , a_ , max_length=a_ , padding="max_length" )
# Pair input
self.assertRaises(a_ , tokenizer_r.encode_plus , a_ , max_length=a_ , padding="max_length" )
# Pair input
self.assertRaises(
a_ , tokenizer_r.batch_encode_plus , a_ , max_length=a_ , padding="max_length" , )
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ : Optional[int] = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
lowerCAmelCase_ : Tuple = "This is a simple input"
lowerCAmelCase_ : Any = ["This is a simple input looooooooong", "This is a simple input"]
lowerCAmelCase_ : Union[str, Any] = ("This is a simple input", "This is a pair")
lowerCAmelCase_ : Optional[Any] = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
lowerCAmelCase_ : Dict = tokenizer.pad_token_id
lowerCAmelCase_ : Union[str, Any] = tokenizer(a_ , padding="max_length" , max_length=30 , return_tensors="np" )
lowerCAmelCase_ : Tuple = tokenizer(a_ , padding=a_ , truncate=a_ , return_tensors="np" )
lowerCAmelCase_ : Dict = tokenizer(*a_ , padding="max_length" , max_length=60 , return_tensors="np" )
lowerCAmelCase_ : Union[str, Any] = tokenizer(a_ , padding=a_ , truncate=a_ , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def lowerCamelCase ( self : List[Any] ):
lowerCAmelCase_ : Tuple = "$$$"
lowerCAmelCase_ : Optional[Any] = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=a_ , add_bos_token=a_ )
lowerCAmelCase_ : Tuple = "This is a simple input"
lowerCAmelCase_ : Optional[int] = ["This is a simple input 1", "This is a simple input 2"]
lowerCAmelCase_ : Optional[Any] = tokenizer.bos_token_id
lowerCAmelCase_ : int = tokenizer(a_ )
lowerCAmelCase_ : Optional[Any] = tokenizer(a_ )
self.assertEqual(out_s.input_ids[0] , a_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowerCAmelCase_ : int = tokenizer.decode(out_s.input_ids )
lowerCAmelCase_ : List[Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , a_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def lowerCamelCase ( self : List[str] ):
pass
def lowerCamelCase ( self : List[Any] ):
# TODO: change to self.get_tokenizers() when the fast version is implemented
lowerCAmelCase_ : int = [self.get_tokenizer(do_lower_case=a_ , add_bos_token=a_ )]
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowerCAmelCase_ : Optional[Any] = "Encode this."
lowerCAmelCase_ : List[str] = "This one too please."
lowerCAmelCase_ : Tuple = tokenizer.encode(a_ , add_special_tokens=a_ )
encoded_sequence += tokenizer.encode(a_ , add_special_tokens=a_ )
lowerCAmelCase_ : Dict = tokenizer.encode_plus(
a_ , a_ , add_special_tokens=a_ , return_special_tokens_mask=a_ , )
lowerCAmelCase_ : List[str] = encoded_sequence_dict["input_ids"]
lowerCAmelCase_ : Optional[Any] = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(a_ ) , len(a_ ) )
lowerCAmelCase_ : str = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(a_ )
]
lowerCAmelCase_ : List[Any] = [x for x in filtered_sequence if x is not None]
self.assertEqual(a_ , a_ )
@require_tokenizers
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase ( self : str ):
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
lowerCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=a_ )
lowerCAmelCase_ : List[Any] = "A photo of a cat"
lowerCAmelCase_ : Union[str, Any] = tokenizer.encode(
a_ , )
self.assertEqual(a_ , [2, 2_50, 13_45, 9, 10, 47_58] )
tokenizer.save_pretrained("test_opt" )
lowerCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained("./test_opt" )
lowerCAmelCase_ : Optional[int] = tokenizer.encode(
a_ , )
self.assertEqual(a_ , [2, 2_50, 13_45, 9, 10, 47_58] )
def lowerCamelCase ( self : int ):
lowerCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=a_ )
lowerCAmelCase_ : Any = "A photo of a cat"
lowerCAmelCase_ : List[str] = tokenizer.encode(
a_ , )
# Same as above
self.assertEqual(a_ , [2, 2_50, 13_45, 9, 10, 47_58] )
@unittest.skip("This test is failing because of a bug in the fast tokenizer" )
def lowerCamelCase ( self : List[str] ):
lowerCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=a_ )
lowerCAmelCase_ : Tuple = "bos"
lowerCAmelCase_ : Dict = tokenizer.get_vocab()["bos"]
lowerCAmelCase_ : List[Any] = "A photo of a cat"
lowerCAmelCase_ : Optional[Any] = tokenizer.encode(
a_ , )
# We changed the bos token
self.assertEqual(a_ , [3_19_57, 2_50, 13_45, 9, 10, 47_58] )
tokenizer.save_pretrained("./tok" )
lowerCAmelCase_ : str = AutoTokenizer.from_pretrained("./tok" )
self.assertTrue(tokenizer.is_fast )
lowerCAmelCase_ : int = tokenizer.encode(
a_ , )
self.assertEqual(a_ , [3_19_57, 2_50, 13_45, 9, 10, 47_58] )
| 610
| 0
|
'''simple docstring'''
# Imports
import numpy as np
class _lowerCAmelCase :
def __init__( self : List[Any] , __snake_case : List[Any]=None , __snake_case : Optional[int]=None , __snake_case : Optional[Any]=None , __snake_case : int=None , __snake_case : Union[str, Any]=None ):
self.set_matricies(red=__snake_case , green=__snake_case , blue=__snake_case , red_edge=__snake_case , nir=__snake_case )
def snake_case ( self : Optional[Any] , __snake_case : Union[str, Any]=None , __snake_case : str=None , __snake_case : Optional[int]=None , __snake_case : Any=None , __snake_case : Optional[Any]=None ):
if red is not None:
lowerCamelCase :Union[str, Any] = red
if green is not None:
lowerCamelCase :Tuple = green
if blue is not None:
lowerCamelCase :Optional[Any] = blue
if red_edge is not None:
lowerCamelCase :Any = red_edge
if nir is not None:
lowerCamelCase :List[Any] = nir
return True
def snake_case ( self : Dict , __snake_case : Dict="" , __snake_case : Optional[Any]=None , __snake_case : Optional[int]=None , __snake_case : List[str]=None , __snake_case : Any=None , __snake_case : Any=None ):
self.set_matricies(red=__snake_case , green=__snake_case , blue=__snake_case , red_edge=__snake_case , nir=__snake_case )
lowerCamelCase :List[Any] = {
'''ARVI2''': self.arvaa,
'''CCCI''': self.ccci,
'''CVI''': self.cvi,
'''GLI''': self.gli,
'''NDVI''': self.ndvi,
'''BNDVI''': self.bndvi,
'''redEdgeNDVI''': self.red_edge_ndvi,
'''GNDVI''': self.gndvi,
'''GBNDVI''': self.gbndvi,
'''GRNDVI''': self.grndvi,
'''RBNDVI''': self.rbndvi,
'''PNDVI''': self.pndvi,
'''ATSAVI''': self.atsavi,
'''BWDRVI''': self.bwdrvi,
'''CIgreen''': self.ci_green,
'''CIrededge''': self.ci_rededge,
'''CI''': self.ci,
'''CTVI''': self.ctvi,
'''GDVI''': self.gdvi,
'''EVI''': self.evi,
'''GEMI''': self.gemi,
'''GOSAVI''': self.gosavi,
'''GSAVI''': self.gsavi,
'''Hue''': self.hue,
'''IVI''': self.ivi,
'''IPVI''': self.ipvi,
'''I''': self.i,
'''RVI''': self.rvi,
'''MRVI''': self.mrvi,
'''MSAVI''': self.m_savi,
'''NormG''': self.norm_g,
'''NormNIR''': self.norm_nir,
'''NormR''': self.norm_r,
'''NGRDI''': self.ngrdi,
'''RI''': self.ri,
'''S''': self.s,
'''IF''': self._if,
'''DVI''': self.dvi,
'''TVI''': self.tvi,
'''NDRE''': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('''Index not in the list!''' )
return False
def snake_case ( self : List[Any] ):
return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red)))
def snake_case ( self : List[str] ):
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def snake_case ( self : Optional[int] ):
return self.nir * (self.red / (self.green**2))
def snake_case ( self : List[Any] ):
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def snake_case ( self : Optional[Any] ):
return (self.nir - self.red) / (self.nir + self.red)
def snake_case ( self : str ):
return (self.nir - self.blue) / (self.nir + self.blue)
def snake_case ( self : List[Any] ):
return (self.redEdge - self.red) / (self.redEdge + self.red)
def snake_case ( self : List[str] ):
return (self.nir - self.green) / (self.nir + self.green)
def snake_case ( self : str ):
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def snake_case ( self : Optional[int] ):
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def snake_case ( self : List[str] ):
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def snake_case ( self : List[Any] ):
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def snake_case ( self : Optional[Any] , __snake_case : str=0.0_8 , __snake_case : Optional[int]=1.2_2 , __snake_case : Dict=0.0_3 ):
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def snake_case ( self : List[str] ):
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def snake_case ( self : Dict ):
return (self.nir / self.green) - 1
def snake_case ( self : str ):
return (self.nir / self.redEdge) - 1
def snake_case ( self : Any ):
return (self.red - self.blue) / self.red
def snake_case ( self : Any ):
lowerCamelCase :int = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def snake_case ( self : Any ):
return self.nir - self.green
def snake_case ( self : Optional[Any] ):
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Dict = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red)
def snake_case ( self : Optional[int] , __snake_case : str=0.1_6 ):
return (self.nir - self.green) / (self.nir + self.green + y)
def snake_case ( self : int , __snake_case : Optional[Any]=0.5 ):
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def snake_case ( self : Tuple ):
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def snake_case ( self : Tuple , __snake_case : Dict=None , __snake_case : Dict=None ):
return (self.nir - b) / (a * self.red)
def snake_case ( self : Dict ):
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def snake_case ( self : List[Any] ):
return (self.red + self.green + self.blue) / 30.5
def snake_case ( self : int ):
return self.nir / self.red
def snake_case ( self : List[Any] ):
return (self.rvi() - 1) / (self.rvi() + 1)
def snake_case ( self : Any ):
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def snake_case ( self : Dict ):
return self.green / (self.nir + self.red + self.green)
def snake_case ( self : Optional[int] ):
return self.nir / (self.nir + self.red + self.green)
def snake_case ( self : int ):
return self.red / (self.nir + self.red + self.green)
def snake_case ( self : Any ):
return (self.green - self.red) / (self.green + self.red)
def snake_case ( self : str ):
return (self.red - self.green) / (self.red + self.green)
def snake_case ( self : int ):
lowerCamelCase :Optional[Any] = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
lowerCamelCase :Dict = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def snake_case ( self : Optional[Any] ):
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def snake_case ( self : int ):
return self.nir / self.red
def snake_case ( self : Any ):
return (self.ndvi() + 0.5) ** (1 / 2)
def snake_case ( self : int ):
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 704
|
def _lowerCamelCase ( a_ : list):
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''')
for cell_n in range(1 , len(grid[0])):
grid[0][cell_n] += grid[0][cell_n - 1]
lowerCamelCase :Any = grid[0]
for row_n in range(1 , len(a_)):
lowerCamelCase :List[str] = grid[row_n]
lowerCamelCase :Union[str, Any] = fill_row(a_ , a_)
lowerCamelCase :List[Any] = grid[row_n]
return grid[-1][-1]
def _lowerCamelCase ( a_ : list , a_ : list):
current_row[0] += row_above[0]
for cell_n in range(1 , len(a_)):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n])
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 0
|
from __future__ import annotations
from math import pi, sqrt
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> tuple:
if inductance <= 0:
raise ValueError('Inductance cannot be 0 or negative' )
elif capacitance <= 0:
raise ValueError('Capacitance cannot be 0 or negative' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 66
|
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def __a ( self ):
_lowercase : List[Any] = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
_lowercase : List[Any] = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
model.to(_lowerCAmelCase )
from datasets import load_dataset
_lowercase : Union[str, Any] = load_dataset('nielsr/rvlcdip-demo' )
_lowercase : Any = dataset['train'][0]['image'].convert('RGB' )
_lowercase : List[str] = image_processor(_lowerCAmelCase , return_tensors='pt' ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowercase : Dict = model(**_lowerCAmelCase )
_lowercase : Any = outputs.logits
_lowercase : str = torch.Size((1, 1_6) )
self.assertEqual(logits.shape , _lowerCAmelCase )
_lowercase : Union[str, Any] = torch.tensor(
[-0.41_58, -0.40_92, -0.43_47] , device=_lowerCAmelCase , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
| 66
| 1
|
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s""",
datefmt="""%m/%d/%Y %H:%M:%S""",
level=logging.INFO,
)
_UpperCAmelCase = logging.getLogger(__name__)
def UpperCamelCase ( __lowercase : str ):
'''simple docstring'''
A_ : str = git.Repo(search_parent_directories=__lowercase )
A_ : int = {
'repo_id': str(__lowercase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
}
with open(os.path.join(__lowercase ,'git_log.json' ) ,'w' ) as f:
json.dump(__lowercase ,__lowercase ,indent=4 )
def UpperCamelCase ( __lowercase : Any ):
'''simple docstring'''
if params.n_gpu <= 0:
A_ : Any = 0
A_ : List[Any] = -1
A_ : int = True
A_ : List[Any] = False
return
assert torch.cuda.is_available()
logger.info('Initializing GPUs' )
if params.n_gpu > 1:
assert params.local_rank != -1
A_ : Optional[int] = int(os.environ['WORLD_SIZE'] )
A_ : Optional[Any] = int(os.environ['N_GPU_NODE'] )
A_ : Optional[int] = int(os.environ['RANK'] )
# number of nodes / node ID
A_ : Dict = params.world_size // params.n_gpu_per_node
A_ : str = params.global_rank // params.n_gpu_per_node
A_ : Union[str, Any] = True
assert params.n_nodes == int(os.environ['N_NODES'] )
assert params.node_id == int(os.environ['NODE_RANK'] )
# local job (single GPU)
else:
assert params.local_rank == -1
A_ : int = 1
A_ : Optional[Any] = 0
A_ : Any = 0
A_ : List[Any] = 0
A_ : int = 1
A_ : Optional[Any] = 1
A_ : Optional[Any] = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
A_ : Optional[Any] = params.node_id == 0 and params.local_rank == 0
A_ : int = params.n_nodes > 1
# summary
A_ : Tuple = f'''--- Global rank: {params.global_rank} - '''
logger.info(PREFIX + 'Number of nodes: %i' % params.n_nodes )
logger.info(PREFIX + 'Node ID : %i' % params.node_id )
logger.info(PREFIX + 'Local rank : %i' % params.local_rank )
logger.info(PREFIX + 'World size : %i' % params.world_size )
logger.info(PREFIX + 'GPUs per node : %i' % params.n_gpu_per_node )
logger.info(PREFIX + 'Master : %s' % str(params.is_master ) )
logger.info(PREFIX + 'Multi-node : %s' % str(params.multi_node ) )
logger.info(PREFIX + 'Multi-GPU : %s' % str(params.multi_gpu ) )
logger.info(PREFIX + 'Hostname : %s' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('Initializing PyTorch distributed' )
torch.distributed.init_process_group(
init_method='env://' ,backend='nccl' ,)
def UpperCamelCase ( __lowercase : List[Any] ):
'''simple docstring'''
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 70
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCAmelCase = {
"""configuration_biogpt""": ["""BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BioGptConfig"""],
"""tokenization_biogpt""": ["""BioGptTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BioGptForCausalLM""",
"""BioGptForTokenClassification""",
"""BioGptForSequenceClassification""",
"""BioGptModel""",
"""BioGptPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 70
| 1
|
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> float:
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F'{price_plus_tax(100, 0.25) = }')
print(F'{price_plus_tax(125.50, 0.05) = }')
| 509
|
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
__lowerCAmelCase : Dict = {
"E": 12.70,
"T": 9.06,
"A": 8.17,
"O": 7.51,
"I": 6.97,
"N": 6.75,
"S": 6.33,
"H": 6.09,
"R": 5.99,
"D": 4.25,
"L": 4.03,
"C": 2.78,
"U": 2.76,
"M": 2.41,
"W": 2.36,
"F": 2.23,
"G": 2.02,
"Y": 1.97,
"P": 1.93,
"B": 1.29,
"V": 0.98,
"K": 0.77,
"J": 0.15,
"X": 0.15,
"Q": 0.10,
"Z": 0.07,
}
__lowerCAmelCase : Tuple = "ETAOINSHRDLCUMWFGYPBVKJXQZ"
__lowerCAmelCase : int = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def UpperCAmelCase_ ( __lowerCAmelCase ) -> dict[str, int]:
__lowercase : int = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def UpperCAmelCase_ ( __lowerCAmelCase ) -> str:
return x[0]
def UpperCAmelCase_ ( __lowerCAmelCase ) -> str:
__lowercase : Dict = get_letter_count(__lowerCAmelCase )
__lowercase : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(__lowerCAmelCase )
__lowercase : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=__lowerCAmelCase )
__lowercase : Tuple = ''''''.join(freq_to_letter[freq] )
__lowercase : Dict = list(freq_to_letter_str.items() )
freq_pairs.sort(key=__lowerCAmelCase , reverse=__lowerCAmelCase )
__lowercase : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(__lowerCAmelCase )
def UpperCAmelCase_ ( __lowerCAmelCase ) -> int:
__lowercase : Any = get_frequency_order(__lowerCAmelCase )
__lowercase : Any = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 509
| 1
|
def UpperCamelCase_ ( a_ ) ->int:
A =1
for i in range(1 , num + 1 ):
fact *= i
return fact
def UpperCamelCase_ ( a_ ) ->int:
A =0
while number > 0:
A =number % 10
sum_of_digits += last_digit
A =number // 10 # Removing the last_digit from the given number
return sum_of_digits
def UpperCamelCase_ ( a_ = 100 ) ->int:
A =factorial(a_ )
A =split_and_add(a_ )
return result
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 689
|
def UpperCamelCase_ ( a_ , a_ , a_ ) ->int:
def count_of_possible_combinations(a_ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(a_ )
def UpperCamelCase_ ( a_ , a_ , a_ ) ->int:
def count_of_possible_combinations_with_dp_array(
a_ , a_ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
A =sum(
count_of_possible_combinations_with_dp_array(target - item , a_ )
for item in array )
A =answer
return answer
A =[-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(a_ , a_ )
def UpperCamelCase_ ( a_ , a_ , a_ ) ->int:
A =[0] * (target + 1)
A =1
for i in range(1 , target + 1 ):
for j in range(a_ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__a = 3
__a = 5
__a = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 689
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a : List[Any] = logging.get_logger(__name__)
__a : List[Any] = {
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
__a : Optional[Any] = '''audio-spectrogram-transformer'''
def __init__( self , lowerCAmelCase__=7_68 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=30_72 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=16 , lowerCAmelCase__=True , lowerCAmelCase__=10 , lowerCAmelCase__=10 , lowerCAmelCase__=10_24 , lowerCAmelCase__=1_28 , **lowerCAmelCase__ , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = patch_size
__lowercase = qkv_bias
__lowercase = frequency_stride
__lowercase = time_stride
__lowercase = max_length
__lowercase = num_mel_bins
| 534
|
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__a : Tuple = logging.get_logger(__name__)
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = SwinConfig.from_pretrained(
'''microsoft/swin-tiny-patch4-window7-224''' , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
__lowercase = MaskFormerConfig(backbone_config=lowercase )
__lowercase = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
__lowercase = 847
__lowercase = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
__lowercase = 150
__lowercase = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
__lowercase = 171
__lowercase = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
__lowercase = 133
__lowercase = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
__lowercase = 19
__lowercase = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
__lowercase = 65
__lowercase = '''mapillary-vistas-id2label.json'''
__lowercase = json.load(open(hf_hub_download(lowercase , lowercase , repo_type='''dataset''' ) , '''r''' ) )
__lowercase = {int(lowercase ): v for k, v in idalabel.items()}
return config
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = []
# stem
# fmt: off
rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm1.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm1.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.relative_position_index", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.proj.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.proj.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm2.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm2.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc1.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc1.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc2.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc2.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((F"backbone.layers.{i}.downsample.reduction.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((F"backbone.layers.{i}.downsample.norm.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((F"backbone.layers.{i}.downsample.norm.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append((F"backbone.norm{i}.weight", F"model.pixel_level_module.encoder.hidden_states_norms.{i}.weight") )
rename_keys.append((F"backbone.norm{i}.bias", F"model.pixel_level_module.encoder.hidden_states_norms.{i}.bias") )
# FPN
rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F"sem_seg_head.adapter_{source_index}.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight") )
rename_keys.append((F"sem_seg_head.adapter_{source_index}.norm.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight") )
rename_keys.append((F"sem_seg_head.adapter_{source_index}.norm.bias", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias") )
rename_keys.append((F"sem_seg_head.layer_{source_index}.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight") )
rename_keys.append((F"sem_seg_head.layer_{source_index}.norm.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight") )
rename_keys.append((F"sem_seg_head.layer_{source_index}.norm.bias", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias") )
rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') )
rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight", F"model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias", F"model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias") )
# cross-attention out projection
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight", F"model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias", F"model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias") )
# MLP 1
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight", F"model.transformer_module.decoder.layers.{idx}.fc1.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias", F"model.transformer_module.decoder.layers.{idx}.fc1.bias") )
# MLP 2
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight", F"model.transformer_module.decoder.layers.{idx}.fc2.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias", F"model.transformer_module.decoder.layers.{idx}.fc2.bias") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight", F"model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias", F"model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight", F"model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias", F"model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias") )
# layernorm 3 (final layernorm)
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight", F"model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias", F"model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias") )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') )
# heads on top
rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') )
for i in range(3 ):
rename_keys.append((F"sem_seg_head.predictor.mask_embed.layers.{i}.weight", F"mask_embedder.{i}.0.weight") )
rename_keys.append((F"sem_seg_head.predictor.mask_embed.layers.{i}.bias", F"mask_embedder.{i}.0.bias") )
# fmt: on
return rename_keys
def UpperCAmelCase ( lowercase , lowercase , lowercase ):
"""simple docstring"""
__lowercase = dct.pop(lowercase )
__lowercase = val
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
__lowercase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__lowercase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__lowercase = state_dict.pop(F"backbone.layers.{i}.blocks.{j}.attn.qkv.weight" )
__lowercase = state_dict.pop(F"backbone.layers.{i}.blocks.{j}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[:dim, :]
__lowercase = in_proj_bias[: dim]
__lowercase = in_proj_weight[
dim : dim * 2, :
]
__lowercase = in_proj_bias[
dim : dim * 2
]
__lowercase = in_proj_weight[
-dim :, :
]
__lowercase = in_proj_bias[-dim :]
# fmt: on
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
__lowercase = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowercase = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight" )
__lowercase = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[: hidden_size, :]
__lowercase = in_proj_bias[:config.hidden_size]
__lowercase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowercase = in_proj_bias[hidden_size : hidden_size * 2]
__lowercase = in_proj_weight[-hidden_size :, :]
__lowercase = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowercase = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight" )
__lowercase = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[: hidden_size, :]
__lowercase = in_proj_bias[:config.hidden_size]
__lowercase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowercase = in_proj_bias[hidden_size : hidden_size * 2]
__lowercase = in_proj_weight[-hidden_size :, :]
__lowercase = in_proj_bias[-hidden_size :]
# fmt: on
def UpperCAmelCase ( ):
"""simple docstring"""
__lowercase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowercase = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase ( lowercase , lowercase , lowercase , lowercase = False ):
"""simple docstring"""
__lowercase = get_maskformer_config(lowercase )
# load original state_dict
with open(lowercase , '''rb''' ) as f:
__lowercase = pickle.load(lowercase )
__lowercase = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
__lowercase = create_rename_keys(lowercase )
for src, dest in rename_keys:
rename_key(lowercase , lowercase , lowercase )
read_in_swin_q_k_v(lowercase , config.backbone_config )
read_in_decoder_q_k_v(lowercase , lowercase )
# update to torch tensors
for key, value in state_dict.items():
__lowercase = torch.from_numpy(lowercase )
# load 🤗 model
__lowercase = MaskFormerForInstanceSegmentation(lowercase )
model.eval()
for name, param in model.named_parameters():
print(lowercase , param.shape )
__lowercase , __lowercase = model.load_state_dict(lowercase , strict=lowercase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowercase ) == 0, F"Unexpected keys: {unexpected_keys}"
# verify results
__lowercase = prepare_img()
if "vistas" in model_name:
__lowercase = 65
elif "cityscapes" in model_name:
__lowercase = 65535
else:
__lowercase = 255
__lowercase = True if '''ade''' in model_name else False
__lowercase = MaskFormerImageProcessor(ignore_index=lowercase , reduce_labels=lowercase )
__lowercase = image_processor(lowercase , return_tensors='''pt''' )
__lowercase = model(**lowercase )
print('''Logits:''' , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
__lowercase = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowercase , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"Saving model and image processor to {pytorch_dump_folder_path}" )
Path(lowercase ).mkdir(exist_ok=lowercase )
model.save_pretrained(lowercase )
image_processor.save_pretrained(lowercase )
if push_to_hub:
print('''Pushing model and image processor to the hub...''' )
model.push_to_hub(F"nielsr/{model_name}" )
image_processor.push_to_hub(F"nielsr/{model_name}" )
if __name__ == "__main__":
__a : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""maskformer-swin-tiny-ade""",
type=str,
help=("""Name of the MaskFormer model you'd like to convert""",),
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""",
type=str,
help="""Path to the original state dict (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__a : str = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 534
| 1
|
import os
def a__ ( ):
'''simple docstring'''
__magic_name__ = os.path.join(os.path.dirname(A_ ), """num.txt""" )
with open(A_ ) as file_hand:
return str(sum(int(A_ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 710
|
import math
import random
def a__ ( A_, A_ = False ):
'''simple docstring'''
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
__lowerCAmelCase : Union[str, Any] = 0.02
def a__ ( A_, A_ ):
'''simple docstring'''
__magic_name__ = float(2 * (random.randint(1, 100 )) - 1 )
for _ in range(A_ ):
# Forward propagation
__magic_name__ = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
__magic_name__ = (expected / 100) - layer_a
# Error delta
__magic_name__ = layer_1_error * sigmoid_function(A_, A_ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : List[Any] = int(input('Expected value: '))
__lowerCAmelCase : Tuple = int(input('Number of propagations: '))
print(forward_propagation(expected, number_propagations))
| 76
| 0
|
from collections import defaultdict
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[Any] = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
_lowercase : Tuple = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(__UpperCamelCase ) )
]
_lowercase : Tuple = defaultdict(__UpperCamelCase ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
_lowercase : Any = (1 << len(__UpperCamelCase )) - 1
def __a ( self , _lowerCAmelCase , _lowerCAmelCase ):
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
_lowercase : Dict = self.count_ways_until(__UpperCamelCase , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
_lowercase : int = total_ways_util
return self.dp[mask][task_no]
def __a ( self , _lowerCAmelCase ):
# Store the list of persons for each task
for i in range(len(__UpperCamelCase ) ):
for j in task_performed[i]:
self.task[j].append(__UpperCamelCase )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
UpperCamelCase = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
UpperCamelCase = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 66
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json""",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class UpperCAmelCase_ ( _lowercase):
snake_case__ = '''gpt_neox'''
def __init__( self : Dict , __UpperCamelCase : int=5_0432 , __UpperCamelCase : List[Any]=6144 , __UpperCamelCase : str=44 , __UpperCamelCase : List[str]=64 , __UpperCamelCase : int=2_4576 , __UpperCamelCase : Tuple="gelu" , __UpperCamelCase : Dict=0.2_5 , __UpperCamelCase : int=1_0000 , __UpperCamelCase : Optional[Any]=0.0 , __UpperCamelCase : List[Any]=0.0 , __UpperCamelCase : Tuple=0.1 , __UpperCamelCase : Dict=2048 , __UpperCamelCase : Optional[Any]=0.0_2 , __UpperCamelCase : Optional[Any]=1E-5 , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Optional[int]=0 , __UpperCamelCase : Union[str, Any]=2 , __UpperCamelCase : List[Any]=False , __UpperCamelCase : List[str]=True , __UpperCamelCase : Optional[Any]=None , **__UpperCamelCase : Union[str, Any] , ) -> Union[str, Any]:
super().__init__(bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
_UpperCamelCase = vocab_size
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = rotary_pct
_UpperCamelCase = rotary_emb_base
_UpperCamelCase = attention_dropout
_UpperCamelCase = hidden_dropout
_UpperCamelCase = classifier_dropout
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = use_cache
_UpperCamelCase = tie_word_embeddings
_UpperCamelCase = use_parallel_residual
_UpperCamelCase = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' )
def _UpperCamelCase ( self : Optional[int] ) -> Tuple:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __UpperCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F'''got {self.rope_scaling}''' )
_UpperCamelCase = self.rope_scaling.get('''type''' , __UpperCamelCase )
_UpperCamelCase = self.rope_scaling.get('''factor''' , __UpperCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(__UpperCamelCase , __UpperCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 420
| 0
|
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCamelCase ( __snake_case ):
'''simple docstring'''
lowerCAmelCase__ = ['''image_processor''', '''tokenizer''']
lowerCAmelCase__ = '''AutoImageProcessor'''
lowerCAmelCase__ = '''AutoTokenizer'''
def __init__( self : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int ) ->Tuple:
super().__init__(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase_ = self.image_processor
def __call__( self : str , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : str=None , **UpperCAmelCase__ : List[Any] ) ->str:
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCAmelCase_ = self.tokenizer(__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )
if images is not None:
UpperCAmelCase_ = self.image_processor(__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )
if text is not None and images is not None:
UpperCAmelCase_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCamelCase ) , tensor_type=__UpperCamelCase )
def lowerCAmelCase__ ( self : str , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : str ) ->int:
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self : List[Any] , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : int ) ->int:
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@property
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Any:
return ["input_ids", "attention_mask", "pixel_values"]
| 701
|
'''simple docstring'''
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowercase__ : Optional[int] = logging.get_logger(__name__)
def __lowerCamelCase ( _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[int, Iterable[int]] , _UpperCamelCase : bool , _UpperCamelCase : int ):
'''simple docstring'''
def constraint_to_multiple_of(_UpperCamelCase : int , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[int]=0 , _UpperCamelCase : str=None ):
UpperCAmelCase_ = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
UpperCAmelCase_ = math.floor(val / multiple ) * multiple
if x < min_val:
UpperCAmelCase_ = math.ceil(val / multiple ) * multiple
return x
UpperCAmelCase_ = (output_size, output_size) if isinstance(_UpperCamelCase , _UpperCamelCase ) else output_size
UpperCAmelCase_ , UpperCAmelCase_ = get_image_size(_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ = output_size
# determine new height and width
UpperCAmelCase_ = output_height / input_height
UpperCAmelCase_ = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
UpperCAmelCase_ = scale_width
else:
# fit height
UpperCAmelCase_ = scale_height
UpperCAmelCase_ = constraint_to_multiple_of(scale_height * input_height , multiple=_UpperCamelCase )
UpperCAmelCase_ = constraint_to_multiple_of(scale_width * input_width , multiple=_UpperCamelCase )
return (new_height, new_width)
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ['''pixel_values''']
def __init__( self : Any , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 255 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , **UpperCAmelCase__ : str , ) ->None:
super().__init__(**UpperCAmelCase__ )
UpperCAmelCase_ = size if size is not None else {'''height''': 384, '''width''': 384}
UpperCAmelCase_ = get_size_dict(UpperCAmelCase__ )
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = keep_aspect_ratio
UpperCAmelCase_ = ensure_multiple_of
UpperCAmelCase_ = resample
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase__ ( self : Union[str, Any] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : List[str] , ) ->np.ndarray:
UpperCAmelCase_ = get_size_dict(UpperCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
UpperCAmelCase_ = get_resize_output_image_size(
UpperCAmelCase__ , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=UpperCAmelCase__ , multiple=UpperCAmelCase__ , )
return resize(UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase__ ( self : int , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[int, float] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[Any] , ) ->Any:
return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase__ ( self : List[Any] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[float, List[float]] , UpperCAmelCase__ : Union[float, List[float]] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[int] , ) ->np.ndarray:
return normalize(UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase__ ( self : str , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : int = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : int = None , UpperCAmelCase__ : PILImageResampling = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : float = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase__ : Any , ) ->PIL.Image.Image:
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = size if size is not None else self.size
UpperCAmelCase_ = get_size_dict(UpperCAmelCase__ )
UpperCAmelCase_ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
UpperCAmelCase_ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
UpperCAmelCase_ = resample if resample is not None else self.resample
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ = image_std if image_std is not None else self.image_std
UpperCAmelCase_ = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase_ = [to_numpy_array(UpperCAmelCase__ ) for image in images]
if do_resize:
UpperCAmelCase_ = [self.resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images]
if do_rescale:
UpperCAmelCase_ = [self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images]
if do_normalize:
UpperCAmelCase_ = [self.normalize(image=UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ ) for image in images]
UpperCAmelCase_ = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
UpperCAmelCase_ = {'''pixel_values''': images}
return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
def lowerCAmelCase__ ( self : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Tuple] = None ) ->Optional[Any]:
UpperCAmelCase_ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(UpperCAmelCase__ ):
UpperCAmelCase_ = target_sizes.numpy()
UpperCAmelCase_ = []
for idx in range(len(UpperCAmelCase__ ) ):
UpperCAmelCase_ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=UpperCAmelCase__ )
UpperCAmelCase_ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCAmelCase__ )
else:
UpperCAmelCase_ = logits.argmax(dim=1 )
UpperCAmelCase_ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 43
| 0
|
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
require_version('''pytorch_lightning>=1.0.4''')
SCREAMING_SNAKE_CASE__ = {
'''base''': AutoModel,
'''sequence-classification''': AutoModelForSequenceClassification,
'''question-answering''': AutoModelForQuestionAnswering,
'''pretraining''': AutoModelForPreTraining,
'''token-classification''': AutoModelForTokenClassification,
'''language-modeling''': AutoModelWithLMHead,
'''summarization''': AutoModelForSeqaSeqLM,
'''translation''': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
SCREAMING_SNAKE_CASE__ = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
SCREAMING_SNAKE_CASE__ = sorted(arg_to_scheduler.keys())
SCREAMING_SNAKE_CASE__ = '''{''' + ''', '''.join(arg_to_scheduler_choices) + '''}'''
class _UpperCamelCase( pl.LightningModule ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : argparse.Namespace , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Tuple="base" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ):
'''simple docstring'''
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(SCREAMING_SNAKE_CASE__ )
__a : str = 0
__a : Tuple = Path(self.hparams.output_dir )
__a : List[str] = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
__a : int = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
else:
__a : PretrainedConfig = config
__a : Union[str, Any] = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(self.hparams , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
assert hasattr(self.config , SCREAMING_SNAKE_CASE__ ), f'''model config doesn\'t have a `{p}` attribute'''
setattr(self.config , SCREAMING_SNAKE_CASE__ , getattr(self.hparams , SCREAMING_SNAKE_CASE__ ) )
if tokenizer is None:
__a : Dict = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=SCREAMING_SNAKE_CASE__ , )
else:
__a : PreTrainedTokenizer = tokenizer
__a : List[str] = MODEL_MODES[mode]
if model is None:
__a : Dict = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=SCREAMING_SNAKE_CASE__ , )
else:
__a : List[str] = model
def __lowerCAmelCase ( self : Dict , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
__a : List[str] = self.model_type.from_pretrained(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
__a : Union[str, Any] = arg_to_scheduler[self.hparams.lr_scheduler]
__a : str = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
__a : int = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return scheduler
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
__a : int = self.model
__a : Any = ['bias', 'LayerNorm.weight']
__a : Tuple = [
{
'params': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'weight_decay': self.hparams.weight_decay,
},
{
'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
if self.hparams.adafactor:
__a : Union[str, Any] = Adafactor(
SCREAMING_SNAKE_CASE__ , lr=self.hparams.learning_rate , scale_parameter=SCREAMING_SNAKE_CASE__ , relative_step=SCREAMING_SNAKE_CASE__ )
else:
__a : str = AdamW(
SCREAMING_SNAKE_CASE__ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
__a : Any = optimizer
__a : str = self.get_lr_scheduler()
return [optimizer], [scheduler]
def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
return self.validation_step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
return self.validation_end(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
__a : List[Any] = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
__a : int = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
if stage == "test":
__a : Any = len(self.test_dataloader().dataset )
else:
__a : Any = self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = len(self.train_dataloader().dataset )
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : bool = False ):
'''simple docstring'''
raise NotImplementedError('You must implement this for your task' )
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return self.train_loader
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
return os.path.join(
self.hparams.data_dir , 'cached_{}_{}_{}'.format(
SCREAMING_SNAKE_CASE__ , list(filter(SCREAMING_SNAKE_CASE__ , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict[str, Any] ):
'''simple docstring'''
__a : Optional[Any] = self.output_dir.joinpath('best_tfmr' )
__a : int = self.step_count
self.model.save_pretrained(SCREAMING_SNAKE_CASE__ )
self.tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
@staticmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
parser.add_argument(
'--model_name_or_path' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--config_name' , default='' , type=SCREAMING_SNAKE_CASE__ , help='Pretrained config name or path if not the same as model_name' )
parser.add_argument(
'--tokenizer_name' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument(
'--cache_dir' , default=str(Path(SCREAMING_SNAKE_CASE__ ).parent / 'test_run' / 'cache' ) , type=SCREAMING_SNAKE_CASE__ , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , )
parser.add_argument(
'--encoder_layerdrop' , type=SCREAMING_SNAKE_CASE__ , help='Encoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--decoder_layerdrop' , type=SCREAMING_SNAKE_CASE__ , help='Decoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--dropout' , type=SCREAMING_SNAKE_CASE__ , help='Dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--attention_dropout' , type=SCREAMING_SNAKE_CASE__ , help='Attention dropout probability (Optional). Goes into model.config' , )
parser.add_argument('--learning_rate' , default=5e-5 , type=SCREAMING_SNAKE_CASE__ , help='The initial learning rate for Adam.' )
parser.add_argument(
'--lr_scheduler' , default='linear' , choices=SCREAMING_SNAKE_CASE__ , metavar=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , help='Learning rate scheduler' , )
parser.add_argument('--weight_decay' , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help='Weight decay if we apply some.' )
parser.add_argument('--adam_epsilon' , default=1e-8 , type=SCREAMING_SNAKE_CASE__ , help='Epsilon for Adam optimizer.' )
parser.add_argument('--warmup_steps' , default=0 , type=SCREAMING_SNAKE_CASE__ , help='Linear warmup over warmup_steps.' )
parser.add_argument('--num_workers' , default=4 , type=SCREAMING_SNAKE_CASE__ , help='kwarg passed to DataLoader' )
parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=SCREAMING_SNAKE_CASE__ )
parser.add_argument('--train_batch_size' , default=3_2 , type=SCREAMING_SNAKE_CASE__ )
parser.add_argument('--eval_batch_size' , default=3_2 , type=SCREAMING_SNAKE_CASE__ )
parser.add_argument('--adafactor' , action='store_true' )
class _UpperCamelCase( pl.Callback ):
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class _UpperCamelCase( pl.Callback ):
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(SCREAMING_SNAKE_CASE__ )
class _UpperCamelCase( pl.Callback ):
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
__a : Optional[int] = trainer.lr_schedulers[0]['scheduler']
__a : List[str] = {f'''lr_group_{i}''': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : pl.Trainer , SCREAMING_SNAKE_CASE__ : pl.LightningModule ):
'''simple docstring'''
rank_zero_info('***** Validation results *****' )
__a : List[str] = trainer.callback_metrics
# Log results
for key in sorted(SCREAMING_SNAKE_CASE__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(SCREAMING_SNAKE_CASE__ , str(metrics[key] ) ) )
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : pl.Trainer , SCREAMING_SNAKE_CASE__ : pl.LightningModule ):
'''simple docstring'''
rank_zero_info('***** Test results *****' )
__a : List[str] = trainer.callback_metrics
# Log and save results to file
__a : Optional[int] = os.path.join(pl_module.hparams.output_dir , 'test_results.txt' )
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as writer:
for key in sorted(SCREAMING_SNAKE_CASE__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(SCREAMING_SNAKE_CASE__ , str(metrics[key] ) ) )
writer.write('{} = {}\n'.format(SCREAMING_SNAKE_CASE__ , str(metrics[key] ) ) )
def UpperCAmelCase__ ( lowerCamelCase_ : Tuple , lowerCamelCase_ : Tuple ):
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
'--output_dir' , default=str(Path(lowerCamelCase_ ).parent / 'test_run' / 'model_checkpoints' ) , type=lowerCamelCase_ , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=lowerCamelCase_ , default='O2' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=lowerCamelCase_ )
parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=lowerCamelCase_ , help='Max gradient norm' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' )
parser.add_argument(
'--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=lowerCamelCase_ , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--seed' , type=lowerCamelCase_ , default=4_2 , help='random seed for initialization' )
parser.add_argument(
'--data_dir' , default=str(Path(lowerCamelCase_ ).parent / 'test_run' / 'dummy-train-data' ) , type=lowerCamelCase_ , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , )
def UpperCAmelCase__ ( lowerCamelCase_ : BaseTransformer , lowerCamelCase_ : argparse.Namespace , lowerCamelCase_ : Any=None , lowerCamelCase_ : Dict=True , lowerCamelCase_ : Optional[Any]=[] , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : List[Any]=None , **lowerCamelCase_ : List[str] , ):
pl.seed_everything(args.seed )
# init model
__a : str = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=lowerCamelCase_ )
# add custom checkpoints
if checkpoint_callback is None:
__a : Optional[Any] = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(lowerCamelCase_ )
if logging_callback is None:
__a : int = LoggingCallback()
__a : int = {}
if args.fpaa:
__a : Tuple = 1_6
if args.gpus > 1:
__a : int = 'auto'
__a : Optional[int] = 'ddp'
__a : Any = args.accumulate_grad_batches
__a : Union[str, Any] = None
__a : Dict = 'auto'
__a : Union[str, Any] = pl.Trainer.from_argparse_args(
lowerCamelCase_ , weights_summary=lowerCamelCase_ , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCamelCase_ , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCamelCase_ , )
if args.do_train:
trainer.fit(lowerCamelCase_ )
else:
print('RAG modeling tests with new set functions successfuly executed!' )
return trainer
| 47
|
'''simple docstring'''
def UpperCamelCase ( ) -> int:
'''simple docstring'''
return 1
def UpperCamelCase ( lowercase_ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def UpperCamelCase ( lowercase_ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(lowercase_ )
def UpperCamelCase ( lowercase_ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else ten_pence(x - 1_0 ) + five_pence(lowercase_ )
def UpperCamelCase ( lowercase_ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else twenty_pence(x - 2_0 ) + ten_pence(lowercase_ )
def UpperCamelCase ( lowercase_ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else fifty_pence(x - 5_0 ) + twenty_pence(lowercase_ )
def UpperCamelCase ( lowercase_ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else one_pound(x - 1_0_0 ) + fifty_pence(lowercase_ )
def UpperCamelCase ( lowercase_ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else two_pound(x - 2_0_0 ) + one_pound(lowercase_ )
def UpperCamelCase ( lowercase_ : int = 2_0_0 ) -> int:
'''simple docstring'''
return two_pound(lowercase_ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 72
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
__snake_case : int = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
__snake_case : Dict = TaTokenizerFast
__snake_case : Optional[int] = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[str] = [
'MT5EncoderModel',
'MT5ForConditionalGeneration',
'MT5ForQuestionAnswering',
'MT5Model',
'MT5PreTrainedModel',
'MT5Stack',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[Any] = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[Any] = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
__snake_case : Union[str, Any] = _LazyModule(
__name__,
globals()['__file__'],
_import_structure,
extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast},
module_spec=__spec__,
)
| 710
|
"""simple docstring"""
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _lowercase ( __snake_case ,__snake_case ) -> Dict:
assert isinstance(__snake_case ,__snake_case )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" ,[False, True] )
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> Any:
__lowerCAmelCase : Dict = tmp_path / "cache"
__lowerCAmelCase : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase : Optional[Any] = ParquetDatasetReader(__snake_case ,cache_dir=__snake_case ,keep_in_memory=__snake_case ).read()
_check_parquet_dataset(__snake_case ,__snake_case )
@pytest.mark.parametrize(
"features" ,[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] ,)
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> str:
__lowerCAmelCase : Tuple = tmp_path / "cache"
__lowerCAmelCase : Dict = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase : List[Any] = features.copy() if features else default_expected_features
__lowerCAmelCase : Dict = (
Features({feature: Value(__snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase : Optional[int] = ParquetDatasetReader(__snake_case ,features=__snake_case ,cache_dir=__snake_case ).read()
_check_parquet_dataset(__snake_case ,__snake_case )
@pytest.mark.parametrize("split" ,[None, NamedSplit("train" ), "train", "test"] )
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> Any:
__lowerCAmelCase : List[str] = tmp_path / "cache"
__lowerCAmelCase : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase : List[Any] = ParquetDatasetReader(__snake_case ,cache_dir=__snake_case ,split=__snake_case ).read()
_check_parquet_dataset(__snake_case ,__snake_case )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" ,[str, list] )
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> Dict:
if issubclass(__snake_case ,__snake_case ):
__lowerCAmelCase : List[Any] = parquet_path
elif issubclass(__snake_case ,__snake_case ):
__lowerCAmelCase : List[Any] = [parquet_path]
__lowerCAmelCase : str = tmp_path / "cache"
__lowerCAmelCase : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase : Tuple = ParquetDatasetReader(__snake_case ,cache_dir=__snake_case ).read()
_check_parquet_dataset(__snake_case ,__snake_case )
def _lowercase ( __snake_case ,__snake_case ,__snake_case=("train",) ) -> int:
assert isinstance(__snake_case ,__snake_case )
for split in splits:
__lowerCAmelCase : str = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" ,[False, True] )
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> str:
__lowerCAmelCase : Any = tmp_path / "cache"
__lowerCAmelCase : int = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase : str = ParquetDatasetReader(
{"train": parquet_path} ,cache_dir=__snake_case ,keep_in_memory=__snake_case ).read()
_check_parquet_datasetdict(__snake_case ,__snake_case )
@pytest.mark.parametrize(
"features" ,[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] ,)
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> Dict:
__lowerCAmelCase : List[Any] = tmp_path / "cache"
__lowerCAmelCase : Dict = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase : str = features.copy() if features else default_expected_features
__lowerCAmelCase : str = (
Features({feature: Value(__snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase : str = ParquetDatasetReader({"train": parquet_path} ,features=__snake_case ,cache_dir=__snake_case ).read()
_check_parquet_datasetdict(__snake_case ,__snake_case )
@pytest.mark.parametrize("split" ,[None, NamedSplit("train" ), "train", "test"] )
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> str:
if split:
__lowerCAmelCase : Optional[int] = {split: parquet_path}
else:
__lowerCAmelCase : str = "train"
__lowerCAmelCase : Optional[Any] = {"train": parquet_path, "test": parquet_path}
__lowerCAmelCase : Tuple = tmp_path / "cache"
__lowerCAmelCase : int = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase : Union[str, Any] = ParquetDatasetReader(__snake_case ,cache_dir=__snake_case ).read()
_check_parquet_datasetdict(__snake_case ,__snake_case ,splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def _lowercase ( __snake_case ,__snake_case ) -> Union[str, Any]:
__lowerCAmelCase : int = ParquetDatasetWriter(__snake_case ,tmp_path / "foo.parquet" )
assert writer.write() > 0
__lowerCAmelCase : str = pq.ParquetFile(tmp_path / "foo.parquet" )
__lowerCAmelCase : int = pf.read()
assert dataset.data.table == output_table
def _lowercase ( __snake_case ,__snake_case ) -> int:
__lowerCAmelCase : int = str(shared_datadir / "test_image_rgb.jpg" )
__lowerCAmelCase : Any = {"image": [image_path]}
__lowerCAmelCase : List[Any] = Features({"image": Image()} )
__lowerCAmelCase : Tuple = Dataset.from_dict(__snake_case ,features=__snake_case )
__lowerCAmelCase : Dict = ParquetDatasetWriter(__snake_case ,tmp_path / "foo.parquet" )
assert writer.write() > 0
__lowerCAmelCase : List[Any] = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
__lowerCAmelCase : List[Any] = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) ,streaming=__snake_case ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" ,[
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] ,)
def _lowercase ( __snake_case ,__snake_case ) -> List[Any]:
assert get_writer_batch_size(__snake_case ) == expected
| 615
| 0
|
'''simple docstring'''
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase=None ):
"""simple docstring"""
if subparsers is not None:
lowercase_ : Optional[Any] = subparsers.add_parser("env" )
else:
lowercase_ : Union[str, Any] = argparse.ArgumentParser("Accelerate env command" )
parser.add_argument(
"--config_file" , default=lowerCamelCase__ , help="The config file to use for the default values in the launching script." )
if subparsers is not None:
parser.set_defaults(func=lowerCamelCase__ )
return parser
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = torch.__version__
lowercase_ : int = torch.cuda.is_available()
lowercase_ : List[Any] = is_xpu_available()
lowercase_ : List[str] = is_npu_available()
lowercase_ : Dict = """Not found"""
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(lowerCamelCase__ ):
lowercase_ : str = load_config_from_file(args.config_file ).to_dict()
lowercase_ : Optional[Any] = {
"""`Accelerate` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Numpy version""": np.__version__,
"""PyTorch version (GPU?)""": F"""{pt_version} ({pt_cuda_available})""",
"""PyTorch XPU available""": str(lowerCamelCase__ ),
"""PyTorch NPU available""": str(lowerCamelCase__ ),
"""System RAM""": F"""{psutil.virtual_memory().total / 1024 ** 3:.2f} GB""",
}
if pt_cuda_available:
lowercase_ : Dict = torch.cuda.get_device_name()
print("\nCopy-and-paste the text below in your GitHub issue\n" )
print("\n".join([F"""- {prop}: {val}""" for prop, val in info.items()] ) )
print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:" )
lowercase_ : Dict = (
"""\n""".join([F"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(lowerCamelCase__ , lowerCamelCase__ )
else F"""\t{accelerate_config}"""
)
print(lowerCamelCase__ )
lowercase_ : Any = accelerate_config
return info
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : List[Any] = env_command_parser()
lowercase_ : Dict = parser.parse_args()
env_command(lowerCamelCase__ )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 620
|
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 135
| 0
|
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowerCamelCase__ ( a ):
for param in module.parameters():
__snake_case = False
def lowerCamelCase__ ( ):
__snake_case = 'cuda' if torch.cuda.is_available() else 'cpu'
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
__snake_case = 'mps'
if device == "mps":
print(
'WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'
' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'
' with generations.' )
return device
def lowerCamelCase__ ( a ):
__snake_case = plt.imshow(a )
fig.axes.get_xaxis().set_visible(a )
fig.axes.get_yaxis().set_visible(a )
plt.show()
def lowerCamelCase__ ( ):
__snake_case = datetime.now()
__snake_case = current_time.strftime('%H:%M:%S' )
return timestamp
| 427
|
'''simple docstring'''
import argparse
import copy
def lowerCamelCase__ ( a ):
__snake_case = {}
with open(a ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
__snake_case = []
_list.append([line.split()[1], line.split()[2]] )
__snake_case = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
__snake_case = []
_list.append([line.split()[0], line.split()[2]] )
__snake_case = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def lowerCamelCase__ ( a , a ):
with open(a ) as f:
__snake_case = f.read(1 )
__snake_case = start_node
__snake_case = []
__snake_case = start_node
__snake_case = 0
while visiting not in first_solution:
__snake_case = 10000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(a ) and k[0] not in first_solution:
__snake_case = k[1]
__snake_case = k[0]
first_solution.append(a )
__snake_case = distance_of_first_solution + int(a )
__snake_case = best_node
first_solution.append(a )
__snake_case = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
__snake_case = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10000
)
return first_solution, distance_of_first_solution
def lowerCamelCase__ ( a , a ):
__snake_case = []
for n in solution[1:-1]:
__snake_case = solution.index(a )
for kn in solution[1:-1]:
__snake_case = solution.index(a )
if n == kn:
continue
__snake_case = copy.deepcopy(a )
__snake_case = kn
__snake_case = n
__snake_case = 0
for k in _tmp[:-1]:
__snake_case = _tmp[_tmp.index(a ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
__snake_case = distance + int(i[1] )
_tmp.append(a )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
__snake_case = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda a : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def lowerCamelCase__ ( a , a , a , a , a ):
__snake_case = 1
__snake_case = first_solution
__snake_case = []
__snake_case = distance_of_first_solution
__snake_case = solution
while count <= iters:
__snake_case = find_neighborhood(a , a )
__snake_case = 0
__snake_case = neighborhood[index_of_best_solution]
__snake_case = len(a ) - 1
__snake_case = False
while not found:
__snake_case = 0
while i < len(a ):
if best_solution[i] != solution[i]:
__snake_case = best_solution[i]
__snake_case = solution[i]
break
__snake_case = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
__snake_case = True
__snake_case = best_solution[:-1]
__snake_case = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
__snake_case = cost
__snake_case = solution
else:
__snake_case = index_of_best_solution + 1
__snake_case = neighborhood[index_of_best_solution]
if len(a ) >= size:
tabu_list.pop(0 )
__snake_case = count + 1
return best_solution_ever, best_cost
def lowerCamelCase__ ( a=None ):
__snake_case = generate_neighbours(args.File )
__snake_case , __snake_case = generate_first_solution(
args.File , a )
__snake_case , __snake_case = tabu_search(
a , a , a , args.Iterations , args.Size , )
print(f'Best solution: {best_sol}, with total distance: {best_cost}.' )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser(description="""Tabu Search""")
parser.add_argument(
"""-f""",
"""--File""",
type=str,
help="""Path to the file containing the data""",
required=True,
)
parser.add_argument(
"""-i""",
"""--Iterations""",
type=int,
help="""How many iterations the algorithm should perform""",
required=True,
)
parser.add_argument(
"""-s""", """--Size""", type=int, help="""Size of the tabu list""", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 427
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.