code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
__UpperCamelCase : Union[str, Any] = logging.getLogger(__name__)
class __UpperCamelCase ( _lowerCAmelCase ):
def __init__( self : Any , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str=None ) -> int:
"""simple docstring"""
super().__init__(
_lowerCAmelCase , question_encoder_tokenizer=_lowerCAmelCase , generator_tokenizer=_lowerCAmelCase , index=_lowerCAmelCase , init_retrieval=_lowerCAmelCase , )
__lowercase = None
def _a ( self : int , _lowerCAmelCase : int ) -> Any:
"""simple docstring"""
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
__lowercase = self._infer_socket_ifname()
# avoid clash with the NCCL port
__lowercase = str(distributed_port + 1 )
__lowercase = dist.new_group(ranks=_lowerCAmelCase , backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def _a ( self : Tuple ) -> List[str]:
"""simple docstring"""
return dist.get_rank(group=self.process_group ) == 0
def _a ( self : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any]=torch.floataa ) -> Tuple:
"""simple docstring"""
__lowercase = torch.empty(_lowerCAmelCase , dtype=_lowerCAmelCase )
dist.scatter(_lowerCAmelCase , src=0 , scatter_list=_lowerCAmelCase , group=self.process_group )
return target_tensor
def _a ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
__lowercase = next((addr for addr in addrs if addr.startswith("""e""" )) , _lowerCAmelCase )
return ifname
def _a ( self : str , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : int ) -> Tuple[np.ndarray, List[dict]]:
"""simple docstring"""
if not dist.is_initialized():
__lowercase , __lowercase = self._main_retrieve(_lowerCAmelCase , _lowerCAmelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_lowerCAmelCase )
# distributed training
__lowercase = dist.get_world_size(group=self.process_group )
# gather logic
__lowercase = None
if self._is_main():
__lowercase = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(_lowerCAmelCase )]
dist.gather(torch.tensor(_lowerCAmelCase ) , dst=0 , gather_list=_lowerCAmelCase , group=self.process_group )
# scatter logic
__lowercase = question_hidden_states.shape[0]
__lowercase = []
__lowercase = []
if self._is_main():
assert len(_lowerCAmelCase ) == world_size
__lowercase , __lowercase = self._main_retrieve(torch.cat(_lowerCAmelCase ).numpy() , _lowerCAmelCase )
__lowercase , __lowercase = torch.tensor(_lowerCAmelCase ), torch.tensor(_lowerCAmelCase )
__lowercase = self._chunk_tensor(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = self._chunk_tensor(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = self._scattered(_lowerCAmelCase , [n_queries, n_docs] , target_type=torch.intaa )
__lowercase = self._scattered(_lowerCAmelCase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(_lowerCAmelCase )
| 80 | '''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCamelCase__ ( A_ , A_ , A_ ):
# Construct model
if gpta_config_file == "":
UpperCAmelCase_ = GPTaConfig()
else:
UpperCAmelCase_ = GPTaConfig.from_json_file(A_ )
UpperCAmelCase_ = GPTaModel(A_ )
# Load weights from numpy
load_tf_weights_in_gpta(A_ , A_ , A_ )
# Save pytorch-model
UpperCAmelCase_ = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
UpperCAmelCase_ = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , A_ )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(A_ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__snake_case : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--gpt2_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
__snake_case : Dict = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 660 | 0 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
_snake_case : Tuple = False
class a (unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : List[str] ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : Any ) -> Dict:
__snake_case : Optional[Any] = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__snake_case : Optional[int] = "A painting of a squirrel eating a burger "
__snake_case : str = torch.manual_seed(0 )
__snake_case : str = pipe(
prompt=lowerCamelCase , generator=lowerCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase )
__snake_case : str = VersatileDiffusionTextToImagePipeline.from_pretrained(lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__snake_case : Tuple = generator.manual_seed(0 )
__snake_case : int = pipe(
prompt=lowerCamelCase , generator=lowerCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def __snake_case ( self : Dict ) -> Dict:
__snake_case : Any = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__snake_case : Optional[int] = "A painting of a squirrel eating a burger "
__snake_case : Tuple = torch.manual_seed(0 )
__snake_case : int = pipe(
prompt=lowerCamelCase , generator=lowerCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
__snake_case : int = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case : List[Any] = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 81 | '''simple docstring'''
from typing import List
from .keymap import KEYMAP, get_character
def lowerCamelCase__ ( A_ ):
def decorator(A_ ):
UpperCAmelCase_ = getattr(A_ , "handle_key" , [] )
handle += [key]
setattr(A_ , "handle_key" , A_ )
return func
return decorator
def lowerCamelCase__ ( *A_ ):
def decorator(A_ ):
UpperCAmelCase_ = getattr(A_ , "handle_key" , [] )
handle += keys
setattr(A_ , "handle_key" , A_ )
return func
return decorator
class lowercase_ ( _A ):
def __new__( cls , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = super().__new__(cls , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not hasattr(UpperCamelCase__ , "key_handler" ):
setattr(UpperCamelCase__ , "key_handler" , {} )
setattr(UpperCamelCase__ , "handle_input" , KeyHandler.handle_input )
for value in attrs.values():
UpperCAmelCase_ = getattr(UpperCamelCase__ , "handle_key" , [] )
for key in handled_keys:
UpperCAmelCase_ = value
return new_cls
@staticmethod
def lowerCamelCase_ ( cls ) -> str:
"""simple docstring"""
UpperCAmelCase_ = get_character()
if char != KEYMAP["undefined"]:
UpperCAmelCase_ = ord(UpperCamelCase__ )
UpperCAmelCase_ = cls.key_handler.get(UpperCamelCase__ )
if handler:
UpperCAmelCase_ = char
return handler(cls )
else:
return None
def lowerCamelCase__ ( cls ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 660 | 0 |
"""simple docstring"""
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class lowercase__ ( pl.LightningModule ):
'''simple docstring'''
def __init__( self : Dict , _UpperCAmelCase : Optional[Any] ) -> int:
'''simple docstring'''
super().__init__()
UpperCAmelCase_ = model
UpperCAmelCase_ = 2
UpperCAmelCase_ = nn.Linear(self.model.config.hidden_size , self.num_labels )
def lowercase__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
pass
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
# load longformer model from model identifier
UpperCAmelCase_ = LongformerModel.from_pretrained(lowerCAmelCase__ )
UpperCAmelCase_ = LightningModel(lowerCAmelCase__ )
UpperCAmelCase_ = torch.load(lowerCAmelCase__ , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
UpperCAmelCase_ = LongformerForQuestionAnswering.from_pretrained(lowerCAmelCase__ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(lowerCAmelCase__ )
print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--longformer_model""",
default=None,
type=str,
required=True,
help="""model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.""",
)
parser.add_argument(
"""--longformer_question_answering_ckpt_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch Lightning Checkpoint.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCamelCase = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 82 | '''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__snake_case : Optional[Any] = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowercase_ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=1_6 , UpperCamelCase__=1_3 , UpperCamelCase__=7 , UpperCamelCase__=1_4 , UpperCamelCase__=1_0 , UpperCamelCase__=1_9 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=True , UpperCamelCase__=1_6 , UpperCamelCase__=2 , UpperCamelCase__=4 , UpperCamelCase__=4 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=[1, 2, 3, 4, 5] , UpperCamelCase__=2_5 , UpperCamelCase__=5 , ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = d_model
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = prediction_length
UpperCAmelCase_ = context_length
UpperCAmelCase_ = cardinality
UpperCAmelCase_ = num_time_features
UpperCAmelCase_ = lags_sequence
UpperCAmelCase_ = embedding_dimension
UpperCAmelCase_ = is_training
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = context_length
UpperCAmelCase_ = prediction_length + label_length
UpperCAmelCase_ = label_length
UpperCAmelCase_ = moving_average
UpperCAmelCase_ = autocorrelation_factor
def lowerCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = config.context_length + max(config.lags_sequence )
UpperCAmelCase_ = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
UpperCAmelCase_ = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
UpperCAmelCase_ = floats_tensor([self.batch_size, _past_length] )
UpperCAmelCase_ = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
UpperCAmelCase_ = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
UpperCAmelCase_ = floats_tensor([self.batch_size, config.prediction_length] )
UpperCAmelCase_ = {
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = self.get_config()
UpperCAmelCase_ = self.prepare_autoformer_inputs_dict(UpperCamelCase__ )
return config, inputs_dict
def lowerCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> str:
"""simple docstring"""
UpperCAmelCase_ = AutoformerModel(config=UpperCamelCase__ ).to(UpperCamelCase__ ).eval()
UpperCAmelCase_ = model(**UpperCamelCase__ )
UpperCAmelCase_ = outputs.encoder_last_hidden_state
UpperCAmelCase_ = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = model.get_encoder()
encoder.save_pretrained(UpperCamelCase__ )
UpperCAmelCase_ = AutoformerEncoder.from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = model.create_network_inputs(**UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
UpperCAmelCase_ = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
UpperCAmelCase_ = encoder(inputs_embeds=UpperCamelCase__ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
UpperCAmelCase_ = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
UpperCAmelCase_ = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
UpperCAmelCase_ = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
UpperCAmelCase_ = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = model.get_decoder()
decoder.save_pretrained(UpperCamelCase__ )
UpperCAmelCase_ = AutoformerDecoder.from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
UpperCAmelCase_ = decoder(
trend=UpperCamelCase__ , inputs_embeds=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class lowercase_ ( _A , _A , unittest.TestCase ):
a_ = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
a_ = (AutoformerForPrediction,) if is_torch_available() else ()
a_ = {"""feature-extraction""": AutoformerModel} if is_torch_available() else {}
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = AutoformerModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = model_class.from_pretrained(UpperCamelCase__ , output_loading_info=UpperCamelCase__ )
self.assertEqual(info["missing_keys"] , [] )
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*UpperCamelCase__ )
@unittest.skip(reason="Model has no tokens embeddings" )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
pass
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase_ = inspect.signature(getattr(UpperCamelCase__ , "forward" ) )
# The main input is the name of the argument after `self`
UpperCAmelCase_ = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , UpperCamelCase__ )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(UpperCamelCase__ )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = [
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(UpperCamelCase__ )] , UpperCamelCase__ )
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = True
UpperCAmelCase_ = getattr(self.model_tester , "seq_length" , UpperCamelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , "decoder_seq_length" , UpperCamelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , "encoder_seq_length" , UpperCamelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , "d_model" , UpperCamelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , "num_attention_heads" , UpperCamelCase__ )
UpperCAmelCase_ = d_model // num_attention_heads
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
UpperCAmelCase_ = False
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
UpperCAmelCase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
UpperCAmelCase_ = outputs.encoder_attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
UpperCAmelCase_ = len(UpperCamelCase__ )
UpperCAmelCase_ = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
# decoder attentions
UpperCAmelCase_ = outputs.decoder_attentions
self.assertIsInstance(UpperCamelCase__ , (list, tuple) )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
UpperCAmelCase_ = outputs.cross_attentions
self.assertIsInstance(UpperCamelCase__ , (list, tuple) )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
UpperCAmelCase_ = True
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + 2 , len(UpperCamelCase__ ) )
UpperCAmelCase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def lowerCamelCase_ ( self ) -> str:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def lowerCamelCase__ ( A_="train-batch.pt" ):
UpperCAmelCase_ = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=A_ , repo_type="dataset" )
UpperCAmelCase_ = torch.load(A_ , map_location=A_ )
return batch
@require_torch
@slow
class lowercase_ ( unittest.TestCase ):
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(UpperCamelCase__ )
UpperCAmelCase_ = prepare_batch()
with torch.no_grad():
UpperCAmelCase_ = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
UpperCAmelCase_ = torch.Size(
(6_4, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , UpperCamelCase__ )
UpperCAmelCase_ = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
def lowerCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(UpperCamelCase__ )
UpperCAmelCase_ = prepare_batch("val-batch.pt" )
with torch.no_grad():
UpperCAmelCase_ = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
UpperCAmelCase_ = torch.Size((6_4, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , UpperCamelCase__ )
UpperCAmelCase_ = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(UpperCamelCase__ )
UpperCAmelCase_ = prepare_batch("val-batch.pt" )
with torch.no_grad():
UpperCAmelCase_ = model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
UpperCAmelCase_ = torch.Size((6_4, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , UpperCamelCase__ )
UpperCAmelCase_ = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=UpperCamelCase__ )
UpperCAmelCase_ = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , UpperCamelCase__ , rtol=1e-1 ) )
| 660 | 0 |
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def snake_case_ ( A_ : Tuple, A_ : List[str], A_ : Optional[Any], A_ : Dict, A_ : Dict=True, A_ : int="pt" ):
'''simple docstring'''
_lowerCamelCase : str = {'''add_prefix_space''': True} if isinstance(A_, A_ ) and not line.startswith(''' ''' ) else {}
_lowerCamelCase : Union[str, Any] = padding_side
return tokenizer(
[line], max_length=A_, padding='''max_length''' if pad_to_max_length else None, truncation=A_, return_tensors=A_, add_special_tokens=A_, **A_, )
def snake_case_ ( A_ : Any, A_ : Optional[int], A_ : List[Any]=None, ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = input_ids.ne(A_ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __snake_case ( _lowercase):
def __init__( self : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple="train" , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Any=None , __lowerCAmelCase : Union[str, Any]="" , ):
"""simple docstring"""
super().__init__()
_lowerCamelCase : Optional[int] = Path(__lowerCAmelCase ).joinpath(type_path + '''.source''' )
_lowerCamelCase : List[str] = Path(__lowerCAmelCase ).joinpath(type_path + '''.target''' )
_lowerCamelCase : List[Any] = self.get_char_lens(self.src_file )
_lowerCamelCase : Optional[int] = max_source_length
_lowerCamelCase : Optional[Any] = max_target_length
assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}'''
_lowerCamelCase : List[Any] = tokenizer
_lowerCamelCase : List[Any] = prefix
if n_obs is not None:
_lowerCamelCase : List[str] = self.src_lens[:n_obs]
_lowerCamelCase : int = src_lang
_lowerCamelCase : Union[str, Any] = tgt_lang
def __len__( self : int ):
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self : Dict , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = index + 1 # linecache starts at 1
_lowerCamelCase : Union[str, Any] = self.prefix + linecache.getline(str(self.src_file ) , __lowerCAmelCase ).rstrip('''\n''' )
_lowerCamelCase : Optional[Any] = linecache.getline(str(self.tgt_file ) , __lowerCAmelCase ).rstrip('''\n''' )
assert source_line, f'''empty source line for index {index}'''
assert tgt_line, f'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , __lowerCAmelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_lowerCamelCase : Optional[int] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , __lowerCAmelCase ) else self.tokenizer
)
_lowerCamelCase : Union[str, Any] = self.tokenizer.generator if isinstance(self.tokenizer , __lowerCAmelCase ) else self.tokenizer
_lowerCamelCase : List[str] = encode_line(__lowerCAmelCase , __lowerCAmelCase , self.max_source_length , '''right''' )
_lowerCamelCase : List[str] = encode_line(__lowerCAmelCase , __lowerCAmelCase , self.max_target_length , '''right''' )
_lowerCamelCase : Optional[Any] = source_inputs['''input_ids'''].squeeze()
_lowerCamelCase : Union[str, Any] = target_inputs['''input_ids'''].squeeze()
_lowerCamelCase : Any = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase : str ):
"""simple docstring"""
return [len(__lowerCAmelCase ) for x in Path(__lowerCAmelCase ).open().readlines()]
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : List[Any] = torch.stack([x['''input_ids'''] for x in batch] )
_lowerCamelCase : Tuple = torch.stack([x['''attention_mask'''] for x in batch] )
_lowerCamelCase : Union[str, Any] = torch.stack([x['''decoder_input_ids'''] for x in batch] )
_lowerCamelCase : Tuple = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , __lowerCAmelCase )
else self.tokenizer.pad_token_id
)
_lowerCamelCase : Tuple = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , __lowerCAmelCase )
else self.tokenizer.pad_token_id
)
_lowerCamelCase : Union[str, Any] = trim_batch(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : List[str] = trim_batch(__lowerCAmelCase , __lowerCAmelCase , attention_mask=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
lowerCAmelCase__ = getLogger(__name__)
def snake_case_ ( A_ : List[List] ):
'''simple docstring'''
return list(itertools.chain.from_iterable(A_ ) )
def snake_case_ ( A_ : str ):
'''simple docstring'''
_lowerCamelCase : Dict = get_git_info()
save_json(A_, os.path.join(A_, '''git_log.json''' ) )
def snake_case_ ( A_ : str, A_ : Union[str, Any], A_ : int=4, **A_ : Optional[int] ):
'''simple docstring'''
with open(A_, '''w''' ) as f:
json.dump(A_, A_, indent=A_, **A_ )
def snake_case_ ( A_ : Any ):
'''simple docstring'''
with open(A_ ) as f:
return json.load(A_ )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : List[str] = git.Repo(search_parent_directories=A_ )
_lowerCamelCase : str = {
'''repo_id''': str(A_ ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def snake_case_ ( A_ : Callable, A_ : Iterable ):
'''simple docstring'''
return list(map(A_, A_ ) )
def snake_case_ ( A_ : str, A_ : Tuple ):
'''simple docstring'''
with open(A_, '''wb''' ) as f:
return pickle.dump(A_, A_ )
def snake_case_ ( A_ : List[str] ):
'''simple docstring'''
def remove_articles(A_ : str ):
return re.sub(R'''\b(a|an|the)\b''', ''' ''', A_ )
def white_space_fix(A_ : Any ):
return " ".join(text.split() )
def remove_punc(A_ : List[Any] ):
_lowerCamelCase : Any = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(A_ : Optional[int] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(A_ ) ) ) )
def snake_case_ ( A_ : int, A_ : List[Any] ):
'''simple docstring'''
_lowerCamelCase : str = normalize_answer(A_ ).split()
_lowerCamelCase : int = normalize_answer(A_ ).split()
_lowerCamelCase : str = Counter(A_ ) & Counter(A_ )
_lowerCamelCase : Any = sum(common.values() )
if num_same == 0:
return 0
_lowerCamelCase : int = 1.0 * num_same / len(A_ )
_lowerCamelCase : str = 1.0 * num_same / len(A_ )
_lowerCamelCase : List[Any] = (2 * precision * recall) / (precision + recall)
return fa
def snake_case_ ( A_ : Dict, A_ : str ):
'''simple docstring'''
return normalize_answer(A_ ) == normalize_answer(A_ )
def snake_case_ ( A_ : List[str], A_ : List[str] ):
'''simple docstring'''
assert len(A_ ) == len(A_ )
_lowerCamelCase : Optional[Any] = 0
for hypo, pred in zip(A_, A_ ):
em += exact_match_score(A_, A_ )
if len(A_ ) > 0:
em /= len(A_ )
return {"em": em}
def snake_case_ ( A_ : Optional[int] ):
'''simple docstring'''
return model_prefix.startswith('''rag''' )
def snake_case_ ( A_ : Dict, A_ : int, A_ : List[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_lowerCamelCase : Tuple = '''dropout_rate'''
for p in extra_params:
if getattr(A_, A_, A_ ):
if not hasattr(A_, A_ ) and not hasattr(A_, equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(A_ ) )
delattr(A_, A_ )
continue
_lowerCamelCase : Union[str, Any] = p if hasattr(A_, A_ ) else equivalent_param[p]
setattr(A_, A_, getattr(A_, A_ ) )
delattr(A_, A_ )
return hparams, config
| 83 | '''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case : Dict = logging.get_logger(__name__)
__snake_case : Tuple = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
__snake_case : Tuple = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
__snake_case : Dict = {
'''abeja/gpt-neox-japanese-2.7b''': 20_48,
}
def lowerCamelCase__ ( A_ , A_ ):
with open(A_ , "r" , encoding="utf-8" ) as f:
UpperCAmelCase_ = json.loads(f.read() )
UpperCAmelCase_ = collections.OrderedDict()
UpperCAmelCase_ = collections.OrderedDict()
UpperCAmelCase_ = collections.OrderedDict()
with open(A_ , "r" , encoding="utf-8" ) as f:
UpperCAmelCase_ = f.readlines()
UpperCAmelCase_ = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(A_ ):
UpperCAmelCase_ = b
UpperCAmelCase_ = idx
for wd in b:
UpperCAmelCase_ = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class lowercase_ ( _A ):
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|startoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__=False , **UpperCamelCase__ , ) -> int:
"""simple docstring"""
super().__init__(
unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , do_clean_text=UpperCamelCase__ , **UpperCamelCase__ , )
if not os.path.isfile(UpperCamelCase__ ):
raise ValueError(
F"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
if not os.path.isfile(UpperCamelCase__ ):
raise ValueError(
F"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
UpperCAmelCase_ = do_clean_text
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = load_vocab_and_emoji(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase_ = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
return len(self.raw_vocab )
def lowerCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
return self.subword_tokenizer.tokenize(UpperCamelCase__ , clean=self.do_clean_text )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> int:
"""simple docstring"""
return self.vocab.get(UpperCamelCase__ , self.vocab.get(self.unk_token ) )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[Any]:
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = "".join(UpperCamelCase__ ).strip()
return out_string
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[int]:
"""simple docstring"""
UpperCAmelCase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] )
if len(UpperCamelCase__ ) > self.model_max_length:
UpperCAmelCase_ = input_ids[-self.model_max_length :]
return input_ids
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
"""simple docstring"""
UpperCAmelCase_ = 0
if os.path.isdir(UpperCamelCase__ ):
UpperCAmelCase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] )
else:
UpperCAmelCase_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
UpperCAmelCase_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!" )
UpperCAmelCase_ = token_index
writer.write(",".join(UpperCamelCase__ ) + "\n" )
index += 1
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as writer:
json.dump(self.emoji , UpperCamelCase__ )
return vocab_file, emoji_file
class lowercase_ ( _A ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = vocab # same as swe
UpperCAmelCase_ = ids_to_tokens # same as bpe
UpperCAmelCase_ = emoji
UpperCAmelCase_ = np.max([len(UpperCamelCase__ ) for w in self.vocab.keys()] )
UpperCAmelCase_ = re.compile(R"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)" )
UpperCAmelCase_ = re.compile(R"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*" )
UpperCAmelCase_ = re.compile(R"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}" )
UpperCAmelCase_ = re.compile(
R"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
UpperCAmelCase_ = re.compile(
R"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
UpperCAmelCase_ = re.compile(
R"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" )
UpperCAmelCase_ = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
UpperCAmelCase_ = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
UpperCAmelCase_ = str.maketrans({k: "<BLOCK>" for k in keisen + blocks} )
def __len__( self ) -> int:
"""simple docstring"""
return len(self.ids_to_tokens )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = self.content_repattera.sub("<URL>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<EMAIL>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<TEL>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<DATE>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<DATE>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<PRICE>" , UpperCamelCase__ )
UpperCAmelCase_ = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
UpperCAmelCase_ = content.replace("<BLOCK><BLOCK>" , "<BLOCK>" )
return content
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__=False ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = text.replace(" " , "<SP>" )
UpperCAmelCase_ = text.replace(" " , "<SP>" )
UpperCAmelCase_ = text.replace("\r\n" , "<BR>" )
UpperCAmelCase_ = text.replace("\n" , "<BR>" )
UpperCAmelCase_ = text.replace("\r" , "<BR>" )
UpperCAmelCase_ = text.replace("\t" , "<TAB>" )
UpperCAmelCase_ = text.replace("—" , "ー" )
UpperCAmelCase_ = text.replace("−" , "ー" )
for k, v in self.emoji["emoji"].items():
if k in text:
UpperCAmelCase_ = text.replace(UpperCamelCase__ , UpperCamelCase__ )
if clean:
UpperCAmelCase_ = self.clean_text(UpperCamelCase__ )
def check_simbol(UpperCamelCase__ ):
UpperCAmelCase_ = x.encode()
if len(UpperCamelCase__ ) == 1 and len(UpperCamelCase__ ) == 2:
UpperCAmelCase_ = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0Xc_2_a_1 and c <= 0Xc_2_b_f)
or (c >= 0Xc_7_8_0 and c <= 0Xc_7_8_3)
or (c >= 0Xc_a_b_9 and c <= 0Xc_b_b_f)
or (c >= 0Xc_c_8_0 and c <= 0Xc_d_a_2)
):
return True
return False
def checkuae(UpperCamelCase__ ):
UpperCAmelCase_ = x.encode()
if len(UpperCamelCase__ ) == 1 and len(UpperCamelCase__ ) == 3:
UpperCAmelCase_ = (int(e[0] ) << 1_6) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0Xe_2_8_0_8_0 and c <= 0Xe_2_b_0_7_f:
return True
return False
UpperCAmelCase_ = 0
UpperCAmelCase_ = []
while pos < len(UpperCamelCase__ ):
UpperCAmelCase_ = min(len(UpperCamelCase__ ) , pos + self.maxlen + 1 ) if text[pos] == "<" else pos + 3
UpperCAmelCase_ = [] # (token_id, token, pos)
for e in range(UpperCamelCase__ , UpperCamelCase__ , -1 ):
UpperCAmelCase_ = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(UpperCamelCase__ ) > 2:
UpperCAmelCase_ = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(UpperCamelCase__ ) > 0:
# the smallest token_id is adopted
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x[0] )[0]
result.append(UpperCamelCase__ )
UpperCAmelCase_ = e
else:
UpperCAmelCase_ = pos + 1
UpperCAmelCase_ = text[pos:end]
if check_simbol(UpperCamelCase__ ):
result.append("<KIGOU>" )
elif checkuae(UpperCamelCase__ ):
result.append("<U2000U2BFF>" )
else:
for i in wd.encode("utf-8" ):
result.append("<|byte%d|>" % i )
UpperCAmelCase_ = end
return result
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__="\n" ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(UpperCamelCase__ ) > 0:
words.append(bytearray(UpperCamelCase__ ).decode("utf-8" , errors="replace" ) )
UpperCAmelCase_ = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word] )
elif word == "<SP>":
words.append(" " )
elif word == "<BR>":
words.append(UpperCamelCase__ )
elif word == "<TAB>":
words.append("\t" )
elif word == "<BLOCK>":
words.append("▀" )
elif word == "<KIGOU>":
words.append("ǀ" )
elif word == "<U2000U2BFF>":
words.append("‖" )
else:
words.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
words.append(bytearray(UpperCamelCase__ ).decode("utf-8" , errors="replace" ) )
UpperCAmelCase_ = "".join(UpperCamelCase__ )
return text
| 660 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''',
}
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[Any] = """switch_transformers"""
_UpperCamelCase : int = ["""past_key_values"""]
_UpperCamelCase : Any = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self , snake_case=3_2128 , snake_case=768 , snake_case=64 , snake_case=2048 , snake_case=64 , snake_case=12 , snake_case=3 , snake_case=12 , snake_case=3 , snake_case=12 , snake_case=8 , snake_case=False , snake_case=0.01 , snake_case="float32" , snake_case=False , snake_case=32 , snake_case=128 , snake_case=0.1 , snake_case=1E-6 , snake_case=0.001 , snake_case=0.001 , snake_case=1.0 , snake_case="relu" , snake_case=True , snake_case=False , snake_case=True , snake_case=0 , snake_case=1 , **snake_case , ):
lowercase = vocab_size
lowercase = d_model
lowercase = d_kv
lowercase = d_ff
lowercase = num_sparse_encoder_layers
lowercase = num_layers
lowercase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowercase = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
lowercase = self.num_layers // self.num_sparse_encoder_layers
else:
lowercase = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
lowercase = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
lowercase = self.num_decoder_layers # HACK: this will create 0 sparse layers
lowercase = num_heads
lowercase = num_experts
lowercase = expert_capacity
lowercase = router_bias
lowercase = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
lowercase = router_dtype
lowercase = router_ignore_padding_tokens
lowercase = relative_attention_num_buckets
lowercase = relative_attention_max_distance
lowercase = dropout_rate
lowercase = layer_norm_epsilon
lowercase = initializer_factor
lowercase = feed_forward_proj
lowercase = use_cache
lowercase = add_router_probs
lowercase = router_z_loss_coef
lowercase = router_aux_loss_coef
lowercase = self.feed_forward_proj.split('-' )
lowercase = act_info[-1]
lowercase = act_info[0] == 'gated'
if len(snake_case ) > 1 and act_info[0] != "gated" or len(snake_case ) > 2:
raise ValueError(
F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
lowercase = 'gelu_new'
super().__init__(
pad_token_id=snake_case , eos_token_id=snake_case , is_encoder_decoder=snake_case , **snake_case , )
| 84 | '''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
__snake_case : Union[str, Any] = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def lowerCamelCase__ ( ):
UpperCAmelCase_ = Github(os.environ["GITHUB_TOKEN"] )
UpperCAmelCase_ = g.get_repo("huggingface/diffusers" )
UpperCAmelCase_ = repo.get_issues(state="open" )
for issue in open_issues:
UpperCAmelCase_ = sorted(issue.get_comments() , key=lambda A_ : i.created_at , reverse=A_ )
UpperCAmelCase_ = comments[0] if len(A_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="closed" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open" )
issue.remove_from_labels("stale" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
issue.add_to_labels("stale" )
if __name__ == "__main__":
main()
| 660 | 0 |
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class snake_case ( UpperCamelCase_ , unittest.TestCase ):
lowercase_ = FlaxAutoencoderKL
@property
def __lowercase( self : Optional[int] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = 4
SCREAMING_SNAKE_CASE__ : Optional[int] = 3
SCREAMING_SNAKE_CASE__ : List[str] = (32, 32)
SCREAMING_SNAKE_CASE__ : int = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__ : List[str] = jax.random.uniform(a_ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def __lowercase( self : List[str] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
SCREAMING_SNAKE_CASE__ : Any = self.dummy_input
return init_dict, inputs_dict
| 85 | '''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
__snake_case : List[Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowercase_ ( datasets.BuilderConfig ):
a_ = 1_0000
a_ = None
a_ = None
class lowercase_ ( datasets.ArrowBasedBuilder ):
a_ = ParquetConfig
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
UpperCAmelCase_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCamelCase__ , (str, list, tuple) ):
UpperCAmelCase_ = data_files
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCAmelCase_ = [dl_manager.iter_files(UpperCamelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
UpperCAmelCase_ = []
for split_name, files in data_files.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCAmelCase_ = [dl_manager.iter_files(UpperCamelCase__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(UpperCamelCase__ ):
with open(UpperCamelCase__ , "rb" ) as f:
UpperCAmelCase_ = datasets.Features.from_arrow_schema(pq.read_schema(UpperCamelCase__ ) )
break
splits.append(datasets.SplitGenerator(name=UpperCamelCase__ , gen_kwargs={"files": files} ) )
return splits
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> pa.Table:
"""simple docstring"""
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCAmelCase_ = table_cast(UpperCamelCase__ , self.info.features.arrow_schema )
return pa_table
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" )
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCamelCase__ ) ):
with open(UpperCamelCase__ , "rb" ) as f:
UpperCAmelCase_ = pq.ParquetFile(UpperCamelCase__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
UpperCAmelCase_ = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F"""{file_idx}_{batch_idx}""", self._cast_table(UpperCamelCase__ )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(UpperCamelCase__ )}: {e}""" )
raise
| 660 | 0 |
__a :str = {
'meter': 'm',
'kilometer': 'km',
'megametre': 'Mm',
'gigametre': 'Gm',
'terametre': 'Tm',
'petametre': 'Pm',
'exametre': 'Em',
'zettametre': 'Zm',
'yottametre': 'Ym',
}
# Exponent of the factor(meter)
__a :List[str] = {
'm': 0,
'km': 3,
'Mm': 6,
'Gm': 9,
'Tm': 12,
'Pm': 15,
'Em': 18,
'Zm': 21,
'Ym': 24,
}
def __snake_case ( __UpperCamelCase : float ,__UpperCamelCase : str ,__UpperCamelCase : str ):
"""simple docstring"""
A_ = from_type.lower().strip("s" )
A_ = to_type.lower().strip("s" )
A_ = UNIT_SYMBOL.get(__UpperCamelCase ,__UpperCamelCase )
A_ = UNIT_SYMBOL.get(__UpperCamelCase ,__UpperCamelCase )
if from_sanitized not in METRIC_CONVERSION:
A_ = (
f'''Invalid \'from_type\' value: {from_type!r}.\n'''
f'''Conversion abbreviations are: {", ".join(__UpperCamelCase )}'''
)
raise ValueError(__UpperCamelCase )
if to_sanitized not in METRIC_CONVERSION:
A_ = (
f'''Invalid \'to_type\' value: {to_type!r}.\n'''
f'''Conversion abbreviations are: {", ".join(__UpperCamelCase )}'''
)
raise ValueError(__UpperCamelCase )
A_ = METRIC_CONVERSION[from_sanitized]
A_ = METRIC_CONVERSION[to_sanitized]
A_ = 1
if from_exponent > to_exponent:
A_ = from_exponent - to_exponent
else:
A_ = -(to_exponent - from_exponent)
return value * pow(10 ,__UpperCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod() | 86 | '''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case : Tuple = logging.get_logger(__name__)
__snake_case : Tuple = {'''vocab_file''': '''spiece.model'''}
__snake_case : Dict = {
'''vocab_file''': {
'''AI-Sweden/gpt-sw3-126m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-350m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-1.6b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-6.7b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-20b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model''',
}
}
__snake_case : Tuple = {
'''AI-Sweden/gpt-sw3-126m''': 20_48,
'''AI-Sweden/gpt-sw3-350m''': 20_48,
'''AI-Sweden/gpt-sw3-1.6b''': 20_48,
'''AI-Sweden/gpt-sw3-6.7b''': 20_48,
'''AI-Sweden/gpt-sw3-20b''': 20_48,
}
class lowercase_ ( _A ):
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self , UpperCamelCase__ , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> None:
"""simple docstring"""
UpperCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCAmelCase_ = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
UpperCAmelCase_ = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
UpperCAmelCase_ = "<|endoftext|>" if eos_token is None else eos_token
UpperCAmelCase_ = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
UpperCAmelCase_ = unk_token if pad_token is None else pad_token
UpperCAmelCase_ = eos_token if bos_token is None else bos_token
else:
UpperCAmelCase_ = "<pad>" if pad_token is None else pad_token
UpperCAmelCase_ = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = remove_space
UpperCAmelCase_ = keep_accents
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
# Used for whitespace normalization in input texts
# fmt : off
UpperCAmelCase_ = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
UpperCAmelCase_ = re.compile(
F"""[{"".join(map(UpperCamelCase__ , list(range(0 , 9 ) ) + list(range(1_1 , 3_2 ) ) + list(range(1_2_7 , 1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]""" )
def __getstate__( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = self.__dict__.copy()
UpperCAmelCase_ = None
return state
def __setstate__( self , UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
return len(self.sp_model )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
UpperCAmelCase_ = self.non_printing_characters_re.sub("" , UpperCamelCase__ )
# Normalize whitespaces
UpperCAmelCase_ = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
UpperCAmelCase_ = unicodedata.normalize("NFC" , UpperCamelCase__ )
return text
def lowerCamelCase_ ( self , UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = self.preprocess_text(UpperCamelCase__ )
return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> int:
"""simple docstring"""
return self.sp_model.PieceToId(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
return self.sp_model.IdToPiece(UpperCamelCase__ )
@staticmethod
def lowerCamelCase_ ( UpperCamelCase__ ) -> str:
"""simple docstring"""
return out_string
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = ""
UpperCAmelCase_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCamelCase__ ) + token
UpperCAmelCase_ = True
UpperCAmelCase_ = []
else:
current_sub_tokens.append(UpperCamelCase__ )
UpperCAmelCase_ = False
out_string += self.sp_model.decode(UpperCamelCase__ )
return out_string
def lowerCamelCase_ ( self ) -> Dict[str, int]:
"""simple docstring"""
UpperCAmelCase_ = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , "wb" ) as fi:
UpperCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
"""simple docstring"""
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = self.preprocess_text(UpperCamelCase__ )
UpperCAmelCase_ = self.sp_model.encode(UpperCamelCase__ )
else:
UpperCAmelCase_ = [self.preprocess_text(UpperCamelCase__ ) for t in text]
UpperCAmelCase_ = self.sp_model.encode(UpperCamelCase__ )
if return_tensors is True or return_tensors == "pt":
UpperCAmelCase_ = torch.tensor(UpperCamelCase__ )
return token_ids
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
return self.sp_model.decode(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[int]:
"""simple docstring"""
UpperCAmelCase_ = [F"""User: {text}""" if is_user else F"""Bot: {text}""" for is_user, text in conversation.iter_texts()]
UpperCAmelCase_ = (
F"""{self.eos_token}{self.bos_token}""" + F"""{self.bos_token}""".join(UpperCamelCase__ ) + F"""{self.bos_token}Bot:"""
)
return self.encode(text=UpperCamelCase__ )
| 660 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Tuple = {
"""configuration_timesformer""": ["""TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimesformerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = [
"""TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimesformerModel""",
"""TimesformerForVideoClassification""",
"""TimesformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 87 | '''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowercase_ ( unittest.TestCase ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__=7 , UpperCamelCase__=3 , UpperCamelCase__=1_8 , UpperCamelCase__=3_0 , UpperCamelCase__=4_0_0 , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=[0.5, 0.5, 0.5] , UpperCamelCase__=[0.5, 0.5, 0.5] , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = size if size is not None else {"shortest_edge": 1_8}
UpperCAmelCase_ = crop_size if crop_size is not None else {"height": 1_8, "width": 1_8}
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
def lowerCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowercase_ ( _A , unittest.TestCase ):
a_ = LevitImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = LevitImageProcessingTester(self )
@property
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , "image_mean" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "image_std" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_resize" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_center_crop" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "size" ) )
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 1_8} )
self.assertEqual(image_processor.crop_size , {"height": 1_8, "width": 1_8} )
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2} )
self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4} )
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
pass
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCamelCase_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 660 | 0 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase__ ( A_ ):
__UpperCAmelCase = (DDPMScheduler,)
def UpperCamelCase_ ( self , **SCREAMING_SNAKE_CASE) -> Optional[int]:
_lowerCamelCase : Optional[int] = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.00_01,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**SCREAMING_SNAKE_CASE)
return config
def UpperCamelCase_ ( self) -> Optional[int]:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> Optional[int]:
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE , beta_end=SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> List[Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> str:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> Union[str, Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> Optional[Any]:
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , sample_max_value=SCREAMING_SNAKE_CASE , )
def UpperCamelCase_ ( self) -> Tuple:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> List[str]:
for t in [0, 500, 999]:
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> List[str]:
_lowerCamelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCamelCase : Dict = self.get_scheduler_config()
_lowerCamelCase : Union[str, Any] = scheduler_class(**SCREAMING_SNAKE_CASE)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.0_09_79)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.02)) < 1e-5
def UpperCamelCase_ ( self) -> str:
_lowerCamelCase : List[Any] = self.scheduler_classes[0]
_lowerCamelCase : List[Any] = self.get_scheduler_config()
_lowerCamelCase : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE)
_lowerCamelCase : Optional[int] = len(SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[Any] = self.dummy_model()
_lowerCamelCase : List[Any] = self.dummy_sample_deter
_lowerCamelCase : int = torch.manual_seed(0)
for t in reversed(range(SCREAMING_SNAKE_CASE)):
# 1. predict noise residual
_lowerCamelCase : Tuple = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
# 2. predict previous mean of sample x_t-1
_lowerCamelCase : Tuple = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_lowerCamelCase : Any = pred_prev_sample
_lowerCamelCase : Dict = torch.sum(torch.abs(SCREAMING_SNAKE_CASE))
_lowerCamelCase : Dict = torch.mean(torch.abs(SCREAMING_SNAKE_CASE))
assert abs(result_sum.item() - 2_58.96_06) < 1e-2
assert abs(result_mean.item() - 0.33_72) < 1e-3
def UpperCamelCase_ ( self) -> Optional[int]:
_lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCamelCase : Dict = self.get_scheduler_config(prediction_type="""v_prediction""")
_lowerCamelCase : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[Any] = len(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Optional[Any] = self.dummy_model()
_lowerCamelCase : Tuple = self.dummy_sample_deter
_lowerCamelCase : List[Any] = torch.manual_seed(0)
for t in reversed(range(SCREAMING_SNAKE_CASE)):
# 1. predict noise residual
_lowerCamelCase : Tuple = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
# 2. predict previous mean of sample x_t-1
_lowerCamelCase : Union[str, Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_lowerCamelCase : Optional[Any] = pred_prev_sample
_lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE))
_lowerCamelCase : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE))
assert abs(result_sum.item() - 2_02.02_96) < 1e-2
assert abs(result_mean.item() - 0.26_31) < 1e-3
def UpperCamelCase_ ( self) -> Dict:
_lowerCamelCase : Any = self.scheduler_classes[0]
_lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
_lowerCamelCase : Union[str, Any] = scheduler_class(**SCREAMING_SNAKE_CASE)
_lowerCamelCase : int = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE)
_lowerCamelCase : Optional[Any] = scheduler.timesteps
for i, timestep in enumerate(SCREAMING_SNAKE_CASE):
if i == len(SCREAMING_SNAKE_CASE) - 1:
_lowerCamelCase : Dict = -1
else:
_lowerCamelCase : int = timesteps[i + 1]
_lowerCamelCase : Union[str, Any] = scheduler.previous_timestep(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Optional[Any] = prev_t.item()
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> Tuple:
_lowerCamelCase : Dict = self.scheduler_classes[0]
_lowerCamelCase : List[str] = self.get_scheduler_config()
_lowerCamelCase : Any = scheduler_class(**SCREAMING_SNAKE_CASE)
_lowerCamelCase : int = [100, 87, 50, 51, 0]
with self.assertRaises(SCREAMING_SNAKE_CASE , msg="""`custom_timesteps` must be in descending order."""):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> str:
_lowerCamelCase : str = self.scheduler_classes[0]
_lowerCamelCase : Dict = self.get_scheduler_config()
_lowerCamelCase : Any = scheduler_class(**SCREAMING_SNAKE_CASE)
_lowerCamelCase : Any = [100, 87, 50, 1, 0]
_lowerCamelCase : List[str] = len(SCREAMING_SNAKE_CASE)
with self.assertRaises(SCREAMING_SNAKE_CASE , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`."""):
scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> List[str]:
_lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] = self.get_scheduler_config()
_lowerCamelCase : Dict = scheduler_class(**SCREAMING_SNAKE_CASE)
_lowerCamelCase : int = [scheduler.config.num_train_timesteps]
with self.assertRaises(
SCREAMING_SNAKE_CASE , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE)
| 88 | '''simple docstring'''
from __future__ import annotations
import math
from collections.abc import Callable
def lowerCamelCase__ ( A_ , A_ , A_ , A_ = 100 , ):
UpperCAmelCase_ = x_start
UpperCAmelCase_ = fnc(A_ )
UpperCAmelCase_ = 0.0
for _ in range(A_ ):
# Approximates curve as a sequence of linear lines and sums their length
UpperCAmelCase_ = (x_end - x_start) / steps + xa
UpperCAmelCase_ = fnc(A_ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
UpperCAmelCase_ = xa
UpperCAmelCase_ = fxa
return length
if __name__ == "__main__":
def lowerCamelCase__ ( A_ ):
return math.sin(10 * x )
print('''f(x) = sin(10 * x)''')
print('''The length of the curve from x = -10 to x = 10 is:''')
__snake_case : List[Any] = 10
while i <= 10_00_00:
print(F'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 660 | 0 |
from __future__ import annotations
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Any:
print(F'''Vertex\tShortest Distance from vertex {src}''' )
for i, d in enumerate(lowerCamelCase_ ):
print(F'''{i}\t\t{d}''' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
for j in range(lowerCamelCase_ ):
_lowercase , _lowercase , _lowercase : Dict = (graph[j][k] for k in ['src', 'dst', 'weight'])
if distance[u] != float('inf' ) and distance[u] + w < distance[v]:
return True
return False
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> list[float]:
_lowercase : Optional[int] = [float('inf' )] * vertex_count
_lowercase : str = 0.0
for _ in range(vertex_count - 1 ):
for j in range(lowerCamelCase_ ):
_lowercase , _lowercase , _lowercase : Any = (graph[j][k] for k in ['src', 'dst', 'weight'])
if distance[u] != float('inf' ) and distance[u] + w < distance[v]:
_lowercase : int = distance[u] + w
_lowercase : Dict = check_negative_cycle(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if negative_cycle_exists:
raise Exception('Negative cycle found' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : List[str] = int(input("Enter number of vertices: ").strip())
SCREAMING_SNAKE_CASE : Union[str, Any] = int(input("Enter number of edges: ").strip())
SCREAMING_SNAKE_CASE : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print("Edge ", i + 1)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = (
int(x)
for x in input("Enter source, destination, weight: ").strip().split(" ")
)
SCREAMING_SNAKE_CASE : List[Any] = {"src": src, "dst": dest, "weight": weight}
SCREAMING_SNAKE_CASE : Any = int(input("\nEnter shortest path source:").strip())
SCREAMING_SNAKE_CASE : Optional[Any] = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 89 | '''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class lowercase_ ( _A ):
a_ = """"""
a_ = """hf-legacy""" # "hf://"" is reserved for hffs
def __init__( self , UpperCamelCase__ = None , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> Tuple:
"""simple docstring"""
super().__init__(self , **UpperCamelCase__ )
UpperCAmelCase_ = repo_info
UpperCAmelCase_ = token
UpperCAmelCase_ = None
def lowerCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
if self.dir_cache is None:
UpperCAmelCase_ = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
UpperCAmelCase_ = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(UpperCamelCase__ ): {"name": str(UpperCamelCase__ ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = "rb" , **UpperCamelCase__ , ) -> Optional[int]:
"""simple docstring"""
if not isinstance(self.repo_info , UpperCamelCase__ ):
raise NotImplementedError(F"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
UpperCAmelCase_ = hf_hub_url(self.repo_info.id , UpperCamelCase__ , revision=self.repo_info.sha )
return fsspec.open(
UpperCamelCase__ , mode=UpperCamelCase__ , headers=get_authentication_headers_for_url(UpperCamelCase__ , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open()
def lowerCamelCase_ ( self , UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
"""simple docstring"""
self._get_dirs()
UpperCAmelCase_ = self._strip_protocol(UpperCamelCase__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__=False , **UpperCamelCase__ ) -> str:
"""simple docstring"""
self._get_dirs()
UpperCAmelCase_ = PurePosixPath(path.strip("/" ) )
UpperCAmelCase_ = {}
for p, f in self.dir_cache.items():
UpperCAmelCase_ = PurePosixPath(p.strip("/" ) )
UpperCAmelCase_ = p.parent
if root == path:
UpperCAmelCase_ = f
UpperCAmelCase_ = list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out )
| 660 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a__ ( a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : List[str] = KandinskyVaaImgaImgPipeline
lowercase__ : Any = ["image_embeds", "negative_image_embeds", "image"]
lowercase__ : Union[str, Any] = [
"image_embeds",
"negative_image_embeds",
"image",
]
lowercase__ : List[str] = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
lowercase__ : Union[str, Any] = False
@property
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return 32
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
return 32
@property
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
return self.time_input_dim
@property
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return self.time_input_dim * 4
@property
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
return 1_00
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
torch.manual_seed(0 )
lowerCAmelCase__ = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowerCAmelCase__ = UNetaDConditionModel(**lowerCamelCase_ )
return model
@property
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
torch.manual_seed(0 )
lowerCAmelCase__ = VQModel(**self.dummy_movq_kwargs )
return model
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = self.dummy_unet
lowerCAmelCase__ = self.dummy_movq
lowerCAmelCase__ = {
'''num_train_timesteps''': 10_00,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.00_085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
lowerCAmelCase__ = DDIMScheduler(**lowerCamelCase_ )
lowerCAmelCase__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=0 ) -> Dict:
lowerCAmelCase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCAmelCase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowerCamelCase_ )
# create init_image
lowerCAmelCase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCAmelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase__ = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert('''RGB''' ).resize((2_56, 2_56) )
if str(lowerCamelCase_ ).startswith('''mps''' ):
lowerCAmelCase__ = torch.manual_seed(lowerCamelCase_ )
else:
lowerCAmelCase__ = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCAmelCase__ = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = '''cpu'''
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = self.pipeline_class(**lowerCamelCase_ )
lowerCAmelCase__ = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = pipe(**self.get_dummy_inputs(lowerCamelCase_ ) )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = pipe(
**self.get_dummy_inputs(lowerCamelCase_ ) , return_dict=lowerCamelCase_ , )[0]
lowerCAmelCase__ = image[0, -3:, -3:, -1]
lowerCAmelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ = np.array(
[0.6_199_778, 0.63_984_406, 0.46_145_785, 0.62_944_984, 0.5_622_215, 0.47_306_132, 0.47_441_456, 0.4_607_606, 0.48_719_263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_img2img_frog.npy''' )
lowerCAmelCase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
lowerCAmelCase__ = '''A red cartoon frog, 4k'''
lowerCAmelCase__ = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase_ )
lowerCAmelCase__ = KandinskyVaaImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
lowerCAmelCase__ = pipeline.to(lowerCamelCase_ )
pipeline.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCAmelCase__ , lowerCAmelCase__ = pipe_prior(
lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
lowerCAmelCase__ = pipeline(
image=lowerCamelCase_ , image_embeds=lowerCamelCase_ , negative_image_embeds=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type='''np''' , )
lowerCAmelCase__ = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_ ) | 90 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case : Union[str, Any] = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = ['''PLBartTokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[int] = [
'''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PLBartForCausalLM''',
'''PLBartForConditionalGeneration''',
'''PLBartForSequenceClassification''',
'''PLBartModel''',
'''PLBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
__snake_case : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 660 | 0 |
"""simple docstring"""
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Union[str, Any] ,A_ : Union[str, Any] ,A_ : List[Any] ) -> Union[str, Any]:
A = name
A = val
def __str__( self : Dict ) -> Tuple:
return F'{self.__class__.__name__}({self.name}, {self.val})'
def __lt__( self : Union[str, Any] ,A_ : List[str] ) -> str:
return self.val < other.val
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[int] ,A_ : Union[str, Any] ) -> List[str]:
A = {}
A = {}
A = self.build_heap(A_ )
def __getitem__( self : str ,A_ : Dict ) -> Tuple:
return self.get_value(A_ )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Optional[int] ) -> List[Any]:
return (idx - 1) // 2
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Tuple ) -> Union[str, Any]:
return idx * 2 + 1
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Any ) -> Tuple:
return idx * 2 + 2
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Tuple ) -> Any:
return self.heap_dict[key]
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Any ) -> int:
A = len(A_ ) - 1
A = self.get_parent_idx(A_ )
for idx, i in enumerate(A_ ):
A = idx
A = i.val
for i in range(A_ ,-1 ,-1 ):
self.sift_down(A_ ,A_ )
return array
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Union[str, Any] ,A_ : Tuple ) -> int:
while True:
A = self.get_left_child_idx(A_ ) # noqa: E741
A = self.get_right_child_idx(A_ )
A = idx
if l < len(A_ ) and array[l] < array[idx]:
A = l
if r < len(A_ ) and array[r] < array[smallest]:
A = r
if smallest != idx:
A , A = array[smallest], array[idx]
(
(
A
) , (
A
) ,
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
A = smallest
else:
break
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Any ) -> Tuple:
A = self.get_parent_idx(A_ )
while p >= 0 and self.heap[p] > self.heap[idx]:
A , A = self.heap[idx], self.heap[p]
A , A = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
A = p
A = self.get_parent_idx(A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
return self.heap[0]
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
A , A = self.heap[-1], self.heap[0]
A , A = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
A = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 ,self.heap )
return x
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Optional[int] ) -> Optional[int]:
self.heap.append(A_ )
A = len(self.heap ) - 1
A = node.val
self.sift_up(len(self.heap ) - 1 )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
return len(self.heap ) == 0
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Union[str, Any] ,A_ : Optional[int] ) -> List[Any]:
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
A = new_value
A = new_value
self.sift_up(self.idx_of_element[node] )
_lowercase = Node('''R''', -1)
_lowercase = Node('''B''', 6)
_lowercase = Node('''A''', 3)
_lowercase = Node('''X''', 1)
_lowercase = Node('''E''', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
_lowercase = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('''Min Heap - before decrease key''')
for i in my_min_heap.heap:
print(i)
print('''Min Heap - After decrease key of node [B -> -17]''')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod() | 91 | '''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__snake_case : List[str] = logging.get_logger(__name__)
class lowercase_ ( _A ):
a_ = """linear"""
a_ = """cosine"""
a_ = """cosine_with_restarts"""
a_ = """polynomial"""
a_ = """constant"""
a_ = """constant_with_warmup"""
a_ = """piecewise_constant"""
def lowerCamelCase__ ( A_ , A_ = -1 ):
return LambdaLR(A_ , lambda A_ : 1 , last_epoch=A_ )
def lowerCamelCase__ ( A_ , A_ , A_ = -1 ):
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1.0 , A_ ) )
return 1.0
return LambdaLR(A_ , A_ , last_epoch=A_ )
def lowerCamelCase__ ( A_ , A_ , A_ = -1 ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = step_rules.split("," )
for rule_str in rule_list[:-1]:
UpperCAmelCase_ , UpperCAmelCase_ = rule_str.split(":" )
UpperCAmelCase_ = int(A_ )
UpperCAmelCase_ = float(A_ )
UpperCAmelCase_ = value
UpperCAmelCase_ = float(rule_list[-1] )
def create_rules_function(A_ , A_ ):
def rule_func(A_ ) -> float:
UpperCAmelCase_ = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(A_ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
UpperCAmelCase_ = create_rules_function(A_ , A_ )
return LambdaLR(A_ , A_ , last_epoch=A_ )
def lowerCamelCase__ ( A_ , A_ , A_ , A_=-1 ):
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1 , A_ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(A_ , A_ , A_ )
def lowerCamelCase__ ( A_ , A_ , A_ , A_ = 0.5 , A_ = -1 ):
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1 , A_ ) )
UpperCAmelCase_ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(A_ ) * 2.0 * progress )) )
return LambdaLR(A_ , A_ , A_ )
def lowerCamelCase__ ( A_ , A_ , A_ , A_ = 1 , A_ = -1 ):
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1 , A_ ) )
UpperCAmelCase_ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(A_ ) * progress) % 1.0) )) )
return LambdaLR(A_ , A_ , A_ )
def lowerCamelCase__ ( A_ , A_ , A_ , A_=1e-7 , A_=1.0 , A_=-1 ):
UpperCAmelCase_ = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1 , A_ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
UpperCAmelCase_ = lr_init - lr_end
UpperCAmelCase_ = num_training_steps - num_warmup_steps
UpperCAmelCase_ = 1 - (current_step - num_warmup_steps) / decay_steps
UpperCAmelCase_ = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(A_ , A_ , A_ )
__snake_case : str = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowerCamelCase__ ( A_ , A_ , A_ = None , A_ = None , A_ = None , A_ = 1 , A_ = 1.0 , A_ = -1 , ):
UpperCAmelCase_ = SchedulerType(A_ )
UpperCAmelCase_ = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(A_ , last_epoch=A_ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(A_ , step_rules=A_ , last_epoch=A_ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(A_ , num_warmup_steps=A_ , last_epoch=A_ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
A_ , num_warmup_steps=A_ , num_training_steps=A_ , num_cycles=A_ , last_epoch=A_ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
A_ , num_warmup_steps=A_ , num_training_steps=A_ , power=A_ , last_epoch=A_ , )
return schedule_func(
A_ , num_warmup_steps=A_ , num_training_steps=A_ , last_epoch=A_ )
| 660 | 0 |
'''simple docstring'''
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
UpperCamelCase_ = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
UpperCamelCase_ = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f'''{len(upper_files)} files contain uppercase characters:''')
print("""\n""".join(upper_files) + """\n""")
UpperCamelCase_ = [file for file in filepaths if """ """ in file]
if space_files:
print(f'''{len(space_files)} files contain space characters:''')
print("""\n""".join(space_files) + """\n""")
UpperCamelCase_ = [file for file in filepaths if """-""" in file]
if hyphen_files:
print(f'''{len(hyphen_files)} files contain hyphen characters:''')
print("""\n""".join(hyphen_files) + """\n""")
UpperCamelCase_ = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f'''{len(nodir_files)} files are not in a directory:''')
print("""\n""".join(nodir_files) + """\n""")
UpperCamelCase_ = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 92 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case : Optional[int] = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[int] = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[Any] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
__snake_case : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 660 | 0 |
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
lowerCAmelCase__ :List[str] = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=__UpperCAmelCase , cache_dir=__UpperCAmelCase )
lowerCAmelCase__ :int = [t[-1] for t in os.walk(os.path.join(__UpperCAmelCase , os.listdir(__UpperCAmelCase )[0] , 'snapshots' ) )]
lowerCAmelCase__ :Dict = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :int = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=__UpperCAmelCase )
lowerCAmelCase__ :int = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCAmelCase__ :List[Any] = jax.random.PRNGKey(0 )
lowerCAmelCase__ :List[Any] = 4
lowerCAmelCase__ :Optional[Any] = jax.device_count()
lowerCAmelCase__ :Any = num_samples * [prompt]
lowerCAmelCase__ :Optional[Any] = pipeline.prepare_inputs(__UpperCAmelCase )
# shard inputs and rng
lowerCAmelCase__ :Optional[Any] = replicate(__UpperCAmelCase )
lowerCAmelCase__ :int = jax.random.split(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Dict = shard(__UpperCAmelCase )
lowerCAmelCase__ :int = pipeline(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , jit=__UpperCAmelCase ).images
assert images.shape == (num_samples, 1, 6_4, 6_4, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_51_47_45 ) < 1E-3
assert np.abs(np.abs(__UpperCAmelCase , dtype=np.floataa ).sum() - 4_99_47.8_75 ) < 5E-1
lowerCAmelCase__ :Union[str, Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(__UpperCAmelCase ) == num_samples
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=__UpperCAmelCase )
lowerCAmelCase__ :Tuple = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCAmelCase__ :Optional[Any] = jax.random.PRNGKey(0 )
lowerCAmelCase__ :Any = 5_0
lowerCAmelCase__ :Optional[int] = jax.device_count()
lowerCAmelCase__ :Union[str, Any] = num_samples * [prompt]
lowerCAmelCase__ :Dict = pipeline.prepare_inputs(__UpperCAmelCase )
# shard inputs and rng
lowerCAmelCase__ :Optional[Any] = replicate(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = jax.random.split(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :str = shard(__UpperCAmelCase )
lowerCAmelCase__ :int = pipeline(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , jit=__UpperCAmelCase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_65_24_01) ) < 1E-3
assert np.abs((np.abs(__UpperCAmelCase , dtype=np.floataa ).sum() - 2_38_38_08.2) ) < 5E-1
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=__UpperCAmelCase )
lowerCAmelCase__ :Tuple = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCAmelCase__ :Optional[Any] = jax.random.PRNGKey(0 )
lowerCAmelCase__ :Tuple = 5_0
lowerCAmelCase__ :List[Any] = jax.device_count()
lowerCAmelCase__ :Tuple = num_samples * [prompt]
lowerCAmelCase__ :str = pipeline.prepare_inputs(__UpperCAmelCase )
# shard inputs and rng
lowerCAmelCase__ :List[Any] = replicate(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = jax.random.split(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Tuple = shard(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = pipeline(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , jit=__UpperCAmelCase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3
assert np.abs((np.abs(__UpperCAmelCase , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :Any = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa )
lowerCAmelCase__ :Optional[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCAmelCase__ :Optional[Any] = jax.random.PRNGKey(0 )
lowerCAmelCase__ :Any = 5_0
lowerCAmelCase__ :Optional[int] = jax.device_count()
lowerCAmelCase__ :Tuple = num_samples * [prompt]
lowerCAmelCase__ :Any = pipeline.prepare_inputs(__UpperCAmelCase )
# shard inputs and rng
lowerCAmelCase__ :Any = replicate(__UpperCAmelCase )
lowerCAmelCase__ :Any = jax.random.split(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :int = shard(__UpperCAmelCase )
lowerCAmelCase__ :Dict = pipeline(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , jit=__UpperCAmelCase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3
assert np.abs((np.abs(__UpperCAmelCase , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = FlaxDDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , set_alpha_to_one=__UpperCAmelCase , steps_offset=1 , )
lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase , )
lowerCAmelCase__ :int = scheduler.create_state()
lowerCAmelCase__ :Tuple = scheduler_state
lowerCAmelCase__ :Tuple = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCAmelCase__ :List[str] = jax.random.PRNGKey(0 )
lowerCAmelCase__ :Union[str, Any] = 5_0
lowerCAmelCase__ :Any = jax.device_count()
lowerCAmelCase__ :Tuple = num_samples * [prompt]
lowerCAmelCase__ :List[str] = pipeline.prepare_inputs(__UpperCAmelCase )
# shard inputs and rng
lowerCAmelCase__ :Any = replicate(__UpperCAmelCase )
lowerCAmelCase__ :Any = jax.random.split(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :List[str] = shard(__UpperCAmelCase )
lowerCAmelCase__ :Dict = pipeline(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , jit=__UpperCAmelCase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_45_04_39_45) ) < 1E-3
assert np.abs((np.abs(__UpperCAmelCase , dtype=np.floataa ).sum() - 2_34_76_93.5) ) < 5E-1
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCAmelCase__ :List[str] = jax.device_count()
lowerCAmelCase__ :str = num_samples * [prompt]
lowerCAmelCase__ :str = jax.random.split(jax.random.PRNGKey(0 ) , __UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ :Any = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=__UpperCAmelCase , )
lowerCAmelCase__ :int = replicate(__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = pipeline.prepare_inputs(__UpperCAmelCase )
lowerCAmelCase__ :str = shard(__UpperCAmelCase )
lowerCAmelCase__ :Dict = pipeline(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , jit=__UpperCAmelCase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
lowerCAmelCase__ :Union[str, Any] = images[2, 0, 2_5_6, 1_0:1_7, 1]
# With memory efficient attention
lowerCAmelCase__ , lowerCAmelCase__ :str = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=__UpperCAmelCase , use_memory_efficient_attention=__UpperCAmelCase , )
lowerCAmelCase__ :List[str] = replicate(__UpperCAmelCase )
lowerCAmelCase__ :Dict = pipeline.prepare_inputs(__UpperCAmelCase )
lowerCAmelCase__ :int = shard(__UpperCAmelCase )
lowerCAmelCase__ :int = pipeline(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , jit=__UpperCAmelCase ).images
assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
lowerCAmelCase__ :Optional[Any] = images[2, 0, 2_5_6, 1_0:1_7, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 93 | '''simple docstring'''
import csv
import tweepy
# Twitter API credentials
__snake_case : Union[str, Any] = ''''''
__snake_case : List[Any] = ''''''
__snake_case : List[str] = ''''''
__snake_case : Any = ''''''
def lowerCamelCase__ ( A_ ):
# authorize twitter, initialize tweepy
UpperCAmelCase_ = tweepy.OAuthHandler(A_ , A_ )
auth.set_access_token(A_ , A_ )
UpperCAmelCase_ = tweepy.API(A_ )
# initialize a list to hold all the tweepy Tweets
UpperCAmelCase_ = []
# make initial request for most recent tweets (200 is the maximum allowed count)
UpperCAmelCase_ = api.user_timeline(screen_name=A_ , count=200 )
# save most recent tweets
alltweets.extend(A_ )
# save the id of the oldest tweet less one
UpperCAmelCase_ = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(A_ ) > 0:
print(F"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
UpperCAmelCase_ = api.user_timeline(
screen_name=A_ , count=200 , max_id=A_ )
# save most recent tweets
alltweets.extend(A_ )
# update the id of the oldest tweet less one
UpperCAmelCase_ = alltweets[-1].id - 1
print(F"""...{len(A_ )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
UpperCAmelCase_ = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"""new_{screen_name}_tweets.csv""" , "w" ) as f:
UpperCAmelCase_ = csv.writer(A_ )
writer.writerow(["id", "created_at", "text"] )
writer.writerows(A_ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''')
| 660 | 0 |
'''simple docstring'''
from __future__ import annotations
from random import random
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Dict , UpperCAmelCase : int | None = None ) -> Union[str, Any]:
'''simple docstring'''
lowercase : str =value
lowercase : Tuple =random()
lowercase : Node | None =None
lowercase : Node | None =None
def __repr__( self : Tuple ) -> str:
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return f'\'{self.value}: {self.prior:.5}\''
else:
return pformat(
{f'{self.value}: {self.prior:.5}': (self.left, self.right)} , indent=1 )
def __str__( self : Tuple ) -> str:
'''simple docstring'''
lowercase : Dict =str(self.value ) + ''' '''
lowercase : Optional[Any] =str(self.left or '''''' )
lowercase : int =str(self.right or '''''' )
return value + left + right
def lowercase_ ( __A : Node | None , __A : int ) -> tuple[Node | None, Node | None]:
"""simple docstring"""
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
lowercase , lowercase : Optional[Any] =split(root.left , __A )
return left, root
else:
lowercase , lowercase : List[str] =split(root.right , __A )
return root, right
def lowercase_ ( __A : Node | None , __A : Node | None ) -> Node | None:
"""simple docstring"""
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
lowercase : Union[str, Any] =merge(left.right , __A )
return left
else:
lowercase : Union[str, Any] =merge(__A , right.left )
return right
def lowercase_ ( __A : Node | None , __A : int ) -> Node | None:
"""simple docstring"""
lowercase : Union[str, Any] =Node(__A )
lowercase , lowercase : Union[str, Any] =split(__A , __A )
return merge(merge(__A , __A ) , __A )
def lowercase_ ( __A : Node | None , __A : int ) -> Node | None:
"""simple docstring"""
lowercase , lowercase : str =split(__A , value - 1 )
lowercase , lowercase : Optional[Any] =split(__A , __A )
return merge(__A , __A )
def lowercase_ ( __A : Node | None ) -> None:
"""simple docstring"""
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=''',''' )
inorder(root.right )
def lowercase_ ( __A : Node | None , __A : str ) -> Node | None:
"""simple docstring"""
for arg in args.split():
if arg[0] == "+":
lowercase : Tuple =insert(__A , int(arg[1:] ) )
elif arg[0] == "-":
lowercase : Optional[int] =erase(__A , int(arg[1:] ) )
else:
print('''Unknown command''' )
return root
def lowercase_ ( ) -> None:
"""simple docstring"""
lowercase : List[Any] =None
print(
'''enter numbers to create a tree, + value to add value into treap, '''
'''- value to erase all nodes with value. \'q\' to quit. ''' )
lowercase : Optional[int] =input()
while args != "q":
lowercase : Tuple =interact_treap(__A , __A )
print(__A )
lowercase : List[str] =input()
print('''good by!''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 94 | '''simple docstring'''
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
__snake_case : int = logging.get_logger(__name__)
class lowercase_ ( _A ):
def __init__( self , **UpperCamelCase__ ) -> Tuple:
"""simple docstring"""
requires_backends(self , ["bs4"] )
super().__init__(**UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
UpperCAmelCase_ = parent.find_all(child.name , recursive=UpperCamelCase__ )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(UpperCamelCase__ ) else next(i for i, s in enumerate(UpperCamelCase__ , 1 ) if s is child ) )
UpperCAmelCase_ = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = BeautifulSoup(UpperCamelCase__ , "html.parser" )
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for element in html_code.descendants:
if type(UpperCamelCase__ ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
UpperCAmelCase_ = html.unescape(UpperCamelCase__ ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = self.xpath_soup(UpperCamelCase__ )
stringaxtag_seq.append(UpperCamelCase__ )
stringaxsubs_seq.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError("Number of doc strings and xtags does not correspond" )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError("Number of doc strings and xsubs does not correspond" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = ""
for tagname, subs in zip(UpperCamelCase__ , UpperCamelCase__ ):
xpath += F"""/{tagname}"""
if subs != 0:
xpath += F"""[{subs}]"""
return xpath
def __call__( self , UpperCamelCase__ ) -> BatchFeature:
"""simple docstring"""
UpperCAmelCase_ = False
# Check that strings has a valid type
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = True
elif isinstance(UpperCamelCase__ , (list, tuple) ):
if len(UpperCamelCase__ ) == 0 or isinstance(html_strings[0] , UpperCamelCase__ ):
UpperCAmelCase_ = True
if not valid_strings:
raise ValueError(
"HTML strings must of type `str`, `List[str]` (batch of examples), "
F"""but is of type {type(UpperCamelCase__ )}.""" )
UpperCAmelCase_ = bool(isinstance(UpperCamelCase__ , (list, tuple) ) and (isinstance(html_strings[0] , UpperCamelCase__ )) )
if not is_batched:
UpperCAmelCase_ = [html_strings]
# Get nodes + xpaths
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for html_string in html_strings:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.get_three_from_single(UpperCamelCase__ )
nodes.append(UpperCamelCase__ )
UpperCAmelCase_ = []
for node, tag_list, sub_list in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = self.construct_xpath(UpperCamelCase__ , UpperCamelCase__ )
xpath_strings.append(UpperCamelCase__ )
xpaths.append(UpperCamelCase__ )
# return as Dict
UpperCAmelCase_ = {"nodes": nodes, "xpaths": xpaths}
UpperCAmelCase_ = BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
return encoded_inputs
| 660 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
lowerCamelCase_ = None
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase_ = {
'''vocab_file''': {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/spiece.model''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json''',
},
}
lowerCamelCase_ = {
'''google/fnet-base''': 512,
'''google/fnet-large''': 512,
}
lowerCamelCase_ = '''▁'''
class UpperCamelCase_ (__A ):
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ['''input_ids''', '''token_type_ids''']
__magic_name__ = FNetTokenizer
def __init__( self : List[str] , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : str=False , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[Any]="<unk>" , lowerCAmelCase_ : Union[str, Any]="[SEP]" , lowerCAmelCase_ : Dict="<pad>" , lowerCAmelCase_ : Optional[Any]="[CLS]" , lowerCAmelCase_ : Tuple="[MASK]" , **lowerCAmelCase_ : Optional[int] , ) -> Optional[Any]:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
UpperCAmelCase_ : List[Any] = (
AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ , normalized=lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
else mask_token
)
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , remove_space=lowerCAmelCase_ , keep_accents=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , **lowerCAmelCase_ , )
UpperCAmelCase_ : int = do_lower_case
UpperCAmelCase_ : Any = remove_space
UpperCAmelCase_ : Any = keep_accents
UpperCAmelCase_ : Any = vocab_file
UpperCAmelCase_ : List[str] = False if not self.vocab_file else True
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase_ : int = [self.sep_token_id]
UpperCAmelCase_ : str = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase_ : Dict = [self.sep_token_id]
UpperCAmelCase_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ : Any = os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ):
copyfile(self.vocab_file , lowerCAmelCase_ )
return (out_vocab_file,)
| 95 | '''simple docstring'''
def lowerCamelCase__ ( A_ , A_ ):
_validate_point(A_ )
_validate_point(A_ )
if len(A_ ) != len(A_ ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(A_ , A_ ) ) )
def lowerCamelCase__ ( A_ ):
if point:
if isinstance(A_ , A_ ):
for item in point:
if not isinstance(A_ , (int, float) ):
UpperCAmelCase_ = (
"Expected a list of numbers as input, found "
F"""{type(A_ ).__name__}"""
)
raise TypeError(A_ )
else:
UpperCAmelCase_ = F"""Expected a list of numbers as input, found {type(A_ ).__name__}"""
raise TypeError(A_ )
else:
raise ValueError("Missing an input" )
def lowerCamelCase__ ( A_ , A_ ):
_validate_point(A_ )
_validate_point(A_ )
if len(A_ ) != len(A_ ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(A_ , A_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 | 0 |
"""simple docstring"""
def a ( __UpperCAmelCase : list[int] ) -> float:
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
__magic_name__: Dict = sum(__UpperCAmelCase ) / len(__UpperCAmelCase ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(__UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 96 | '''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
__snake_case : Optional[int] = logging.getLogger(__name__)
def lowerCamelCase__ ( A_ , A_ ):
# save results
if os.path.exists(A_ ):
if os.path.exists(os.path.join(A_ , "config.json" ) ) and os.path.isfile(
os.path.join(A_ , "config.json" ) ):
os.remove(os.path.join(A_ , "config.json" ) )
if os.path.exists(os.path.join(A_ , "pytorch_model.bin" ) ) and os.path.isfile(
os.path.join(A_ , "pytorch_model.bin" ) ):
os.remove(os.path.join(A_ , "pytorch_model.bin" ) )
else:
os.makedirs(A_ )
model.save_pretrained(A_ )
def lowerCamelCase__ ( A_ , A_=False ):
UpperCAmelCase_ = 2
if unlogit:
UpperCAmelCase_ = torch.pow(A_ , A_ )
UpperCAmelCase_ = p * torch.log(A_ )
UpperCAmelCase_ = 0
return -plogp.sum(dim=-1 )
def lowerCamelCase__ ( A_ ):
logger.info("lv, h >\t" + "\t".join(F"""{x + 1}""" for x in range(len(A_ ) ) ) )
for row in range(len(A_ ) ):
if tensor.dtype != torch.long:
logger.info(F"""layer {row + 1}:\t""" + "\t".join(F"""{x:.5f}""" for x in tensor[row].cpu().data ) )
else:
logger.info(F"""layer {row + 1}:\t""" + "\t".join(F"""{x:d}""" for x in tensor[row].cpu().data ) )
def lowerCamelCase__ ( A_ , A_ , A_ , A_=True , A_=True , A_=None , A_=False ):
UpperCAmelCase_ , UpperCAmelCase_ = model.config.num_hidden_layers, model.config.num_attention_heads
UpperCAmelCase_ = torch.zeros(A_ , A_ ).to(args.device )
UpperCAmelCase_ = torch.zeros(A_ , A_ ).to(args.device )
if head_mask is None:
UpperCAmelCase_ = torch.ones(A_ , A_ ).to(args.device )
head_mask.requires_grad_(requires_grad=A_ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
UpperCAmelCase_ = None
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = 0.0
for step, inputs in enumerate(tqdm(A_ , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ):
UpperCAmelCase_ = tuple(t.to(args.device ) for t in inputs )
((UpperCAmelCase_) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
UpperCAmelCase_ = model(A_ , labels=A_ , head_mask=A_ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(A_ ):
UpperCAmelCase_ = entropy(attn.detach() , A_ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(A_ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
UpperCAmelCase_ = 2
UpperCAmelCase_ = torch.pow(torch.pow(A_ , A_ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
UpperCAmelCase_ = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("Attention entropies" )
print_ad_tensor(A_ )
if compute_importance:
logger.info("Head importance scores" )
print_ad_tensor(A_ )
logger.info("Head ranked by importance scores" )
UpperCAmelCase_ = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
UpperCAmelCase_ = torch.arange(
head_importance.numel() , device=args.device )
UpperCAmelCase_ = head_ranks.view_as(A_ )
print_ad_tensor(A_ )
return attn_entropy, head_importance, total_loss
def lowerCamelCase__ ( A_ , A_ , A_ ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = compute_heads_importance(A_ , A_ , A_ , compute_entropy=A_ )
UpperCAmelCase_ = 1 / loss # instead of downsteam score use the LM loss
logger.info("Pruning: original score: %f, threshold: %f" , A_ , original_score * args.masking_threshold )
UpperCAmelCase_ = torch.ones_like(A_ )
UpperCAmelCase_ = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
UpperCAmelCase_ = original_score
while current_score >= original_score * args.masking_threshold:
UpperCAmelCase_ = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
UpperCAmelCase_ = float("Inf" )
UpperCAmelCase_ = head_importance.view(-1 ).sort()[1]
if len(A_ ) <= num_to_mask:
print("BREAK BY num_to_mask" )
break
# mask heads
UpperCAmelCase_ = current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) )
UpperCAmelCase_ = new_head_mask.view(-1 )
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = new_head_mask.view_as(A_ )
UpperCAmelCase_ = new_head_mask.clone().detach()
print_ad_tensor(A_ )
# Compute metric and head importance again
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = compute_heads_importance(
A_ , A_ , A_ , compute_entropy=A_ , head_mask=A_ )
UpperCAmelCase_ = 1 / loss
logger.info(
"Masking: current score: %f, remaining heads %d (%.1f percents)" , A_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("Final head mask" )
print_ad_tensor(A_ )
np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowerCamelCase__ ( A_ , A_ , A_ , A_ ):
UpperCAmelCase_ = datetime.now()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = compute_heads_importance(
A_ , A_ , A_ , compute_entropy=A_ , compute_importance=A_ , head_mask=A_ )
UpperCAmelCase_ = 1 / loss
UpperCAmelCase_ = datetime.now() - before_time
UpperCAmelCase_ = sum(p.numel() for p in model.parameters() )
UpperCAmelCase_ = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(A_ ) )
}
for k, v in heads_to_prune.items():
if isinstance(A_ , A_ ):
UpperCAmelCase_ = [
v,
]
assert sum(len(A_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(A_ )
UpperCAmelCase_ = sum(p.numel() for p in model.parameters() )
UpperCAmelCase_ = datetime.now()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = compute_heads_importance(
A_ , A_ , A_ , compute_entropy=A_ , compute_importance=A_ , head_mask=A_ , actually_pruned=A_ , )
UpperCAmelCase_ = 1 / loss
UpperCAmelCase_ = datetime.now() - before_time
logger.info(
"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , A_ , A_ , pruned_num_params / original_num_params * 100 , )
logger.info("Pruning: score with masking: %f score with pruning: %f" , A_ , A_ )
logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 100 )
save_model(A_ , args.output_dir )
def lowerCamelCase__ ( ):
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir" , default=A_ , type=A_ , required=A_ , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , )
parser.add_argument(
"--model_name_or_path" , default=A_ , type=A_ , required=A_ , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--output_dir" , default=A_ , type=A_ , required=A_ , help="The output directory where the model predictions and checkpoints will be written." , )
# Other parameters
parser.add_argument(
"--config_name" , default="" , type=A_ , help="Pretrained config name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--tokenizer_name" , default="" , type=A_ , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--cache_dir" , default=A_ , type=A_ , help="Where do you want to store the pre-trained models downloaded from s3" , )
parser.add_argument(
"--data_subset" , type=A_ , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." )
parser.add_argument(
"--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
parser.add_argument(
"--dont_normalize_importance_by_layer" , action="store_true" , help="Don't normalize importance score by layers" )
parser.add_argument(
"--dont_normalize_global_importance" , action="store_true" , help="Don't normalize all importance scores between 0 and 1" , )
parser.add_argument(
"--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." )
parser.add_argument(
"--masking_threshold" , default=0.9 , type=A_ , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , )
parser.add_argument(
"--masking_amount" , default=0.1 , type=A_ , help="Amount to heads to masking at each masking step." )
parser.add_argument("--metric_name" , default="acc" , type=A_ , help="Metric to use for head masking." )
parser.add_argument(
"--max_seq_length" , default=128 , type=A_ , help=(
"The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded."
) , )
parser.add_argument("--batch_size" , default=1 , type=A_ , help="Batch size." )
parser.add_argument("--seed" , type=A_ , default=42 )
parser.add_argument("--local_rank" , type=A_ , default=-1 , help="local_rank for distributed training on gpus" )
parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" )
parser.add_argument("--server_ip" , type=A_ , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=A_ , default="" , help="Can be used for distant debugging." )
UpperCAmelCase_ = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=A_ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
UpperCAmelCase_ = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" )
UpperCAmelCase_ = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
UpperCAmelCase_ = torch.device("cuda" , args.local_rank )
UpperCAmelCase_ = 1
torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
UpperCAmelCase_ = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
UpperCAmelCase_ = nn.parallel.DistributedDataParallel(
A_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=A_ )
elif args.n_gpu > 1:
UpperCAmelCase_ = nn.DataParallel(A_ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=A_ )
torch.save(A_ , os.path.join(args.output_dir , "run_args.bin" ) )
logger.info("Training/evaluation parameters %s" , A_ )
# Prepare dataset
UpperCAmelCase_ = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
UpperCAmelCase_ = (torch.from_numpy(A_ ),)
UpperCAmelCase_ = TensorDataset(*A_ )
UpperCAmelCase_ = RandomSampler(A_ )
UpperCAmelCase_ = DataLoader(A_ , sampler=A_ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(A_ , A_ , A_ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
UpperCAmelCase_ = mask_heads(A_ , A_ , A_ )
prune_heads(A_ , A_ , A_ , A_ )
if __name__ == "__main__":
main()
| 660 | 0 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
__a = logging.get_logger(__name__)
def a ( snake_case__: np.ndarray , snake_case__: Union[int, Iterable[int]] , snake_case__: bool , snake_case__: int ):
'''simple docstring'''
def constraint_to_multiple_of(snake_case__: Any , snake_case__: str , snake_case__: Union[str, Any]=0 , snake_case__: str=None ):
lowercase_ = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
lowercase_ = math.floor(val / multiple ) * multiple
if x < min_val:
lowercase_ = math.ceil(val / multiple ) * multiple
return x
lowercase_ = (output_size, output_size) if isinstance(snake_case__ , snake_case__ ) else output_size
lowercase_ , lowercase_ = get_image_size(snake_case__ )
lowercase_ , lowercase_ = output_size
# determine new height and width
lowercase_ = output_height / input_height
lowercase_ = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
lowercase_ = scale_width
else:
# fit height
lowercase_ = scale_height
lowercase_ = constraint_to_multiple_of(scale_height * input_height , multiple=snake_case__ )
lowercase_ = constraint_to_multiple_of(scale_width * input_width , multiple=snake_case__ )
return (new_height, new_width)
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Union[str, Any] = ['pixel_values']
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Union[int, float] = 1 / 2_5_5 , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , **SCREAMING_SNAKE_CASE_ : int , ) -> None:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = size if size is not None else {'''height''': 3_8_4, '''width''': 3_8_4}
lowercase_ = get_size_dict(SCREAMING_SNAKE_CASE_ )
lowercase_ = do_resize
lowercase_ = size
lowercase_ = keep_aspect_ratio
lowercase_ = ensure_multiple_of
lowercase_ = resample
lowercase_ = do_rescale
lowercase_ = rescale_factor
lowercase_ = do_normalize
lowercase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Dict[str, int] , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : PILImageResampling = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : Any , ) -> np.ndarray:
lowercase_ = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
lowercase_ = get_resize_output_image_size(
SCREAMING_SNAKE_CASE_ , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=SCREAMING_SNAKE_CASE_ , multiple=SCREAMING_SNAKE_CASE_ , )
return resize(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Union[int, float] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : List[Any] , ) -> List[Any]:
return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Union[float, List[float]] , SCREAMING_SNAKE_CASE_ : Union[float, List[float]] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : List[str] , ) -> np.ndarray:
return normalize(SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : ImageInput , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : int = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : int = None , SCREAMING_SNAKE_CASE_ : PILImageResampling = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : float = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE_ : ChannelDimension = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ : List[Any] , ) -> PIL.Image.Image:
lowercase_ = do_resize if do_resize is not None else self.do_resize
lowercase_ = size if size is not None else self.size
lowercase_ = get_size_dict(SCREAMING_SNAKE_CASE_ )
lowercase_ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
lowercase_ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
lowercase_ = resample if resample is not None else self.resample
lowercase_ = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ = image_mean if image_mean is not None else self.image_mean
lowercase_ = image_std if image_std is not None else self.image_std
lowercase_ = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
lowercase_ = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
lowercase_ = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_normalize:
lowercase_ = [self.normalize(image=SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ ) for image in images]
lowercase_ = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images]
lowercase_ = {'''pixel_values''': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Tuple] = None ) -> Dict:
lowercase_ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(SCREAMING_SNAKE_CASE_ ):
lowercase_ = target_sizes.numpy()
lowercase_ = []
for idx in range(len(SCREAMING_SNAKE_CASE_ ) ):
lowercase_ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=SCREAMING_SNAKE_CASE_ )
lowercase_ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(SCREAMING_SNAKE_CASE_ )
else:
lowercase_ = logits.argmax(dim=1 )
lowercase_ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 97 | '''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__snake_case : str = logging.getLogger(__name__)
def lowerCamelCase__ ( ):
UpperCAmelCase_ = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path" , type=A_ , default="data/dump.txt" , help="The path to the data." )
parser.add_argument("--tokenizer_type" , type=A_ , default="bert" , choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name" , type=A_ , default="bert-base-uncased" , help="The tokenizer to use." )
parser.add_argument("--dump_file" , type=A_ , default="data/dump" , help="The dump file prefix." )
UpperCAmelCase_ = parser.parse_args()
logger.info(F"""Loading Tokenizer ({args.tokenizer_name})""" )
if args.tokenizer_type == "bert":
UpperCAmelCase_ = BertTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase_ = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
UpperCAmelCase_ = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
UpperCAmelCase_ = RobertaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase_ = tokenizer.special_tokens_map["cls_token"] # `<s>`
UpperCAmelCase_ = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
UpperCAmelCase_ = GPTaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase_ = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
UpperCAmelCase_ = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(F"""Loading text from {args.file_path}""" )
with open(args.file_path , "r" , encoding="utf8" ) as fp:
UpperCAmelCase_ = fp.readlines()
logger.info("Start encoding" )
logger.info(F"""{len(A_ )} examples to process.""" )
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
UpperCAmelCase_ = 10_000
UpperCAmelCase_ = time.time()
for text in data:
UpperCAmelCase_ = F"""{bos} {text.strip()} {sep}"""
UpperCAmelCase_ = tokenizer.encode(A_ , add_special_tokens=A_ )
rslt.append(A_ )
iter += 1
if iter % interval == 0:
UpperCAmelCase_ = time.time()
logger.info(F"""{iter} examples processed. - {(end-start):.2f}s/{interval}expl""" )
UpperCAmelCase_ = time.time()
logger.info("Finished binarization" )
logger.info(F"""{len(A_ )} examples processed.""" )
UpperCAmelCase_ = F"""{args.dump_file}.{args.tokenizer_name}.pickle"""
UpperCAmelCase_ = tokenizer.vocab_size
if vocab_size < (1 << 16):
UpperCAmelCase_ = [np.uintaa(A_ ) for d in rslt]
else:
UpperCAmelCase_ = [np.intaa(A_ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F"""Dump to {dp_file}""" )
with open(A_ , "wb" ) as handle:
pickle.dump(rslt_ , A_ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 660 | 0 |
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : List[Any] = 'Wav2Vec2FeatureExtractor'
_snake_case : List[Any] = 'AutoTokenizer'
def __init__( self : Any , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any ) -> Tuple:
'''simple docstring'''
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = self.feature_extractor
_UpperCamelCase = False
@classmethod
def snake_case__ ( cls : str , lowerCAmelCase__ : Dict , **lowerCAmelCase__ : Union[str, Any] ) -> str:
'''simple docstring'''
try:
return super().from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
except OSError:
warnings.warn(
f"""Loading a tokenizer inside {cls.__name__} from a config that does not"""
''' include a `tokenizer_class` attribute is deprecated and will be '''
'''removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'''
''' attribute to either your `config.json` or `tokenizer_config.json` '''
'''file to suppress this warning: ''' , lowerCAmelCase__ , )
_UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCamelCase = WavaVecaCTCTokenizer.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
return cls(feature_extractor=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
def __call__( self : Optional[int] , *lowerCAmelCase__ : Dict , **lowerCAmelCase__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*lowerCAmelCase__ , **lowerCAmelCase__ )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
_UpperCamelCase = kwargs.pop('''raw_speech''' )
else:
_UpperCamelCase = kwargs.pop('''audio''' , lowerCAmelCase__ )
_UpperCamelCase = kwargs.pop('''sampling_rate''' , lowerCAmelCase__ )
_UpperCamelCase = kwargs.pop('''text''' , lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
_UpperCamelCase = args[0]
_UpperCamelCase = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
_UpperCamelCase = self.feature_extractor(lowerCAmelCase__ , *lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , **lowerCAmelCase__ )
if text is not None:
_UpperCamelCase = self.tokenizer(lowerCAmelCase__ , **lowerCAmelCase__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_UpperCamelCase = encodings['''input_ids''']
return inputs
def snake_case__ ( self : Optional[Any] , *lowerCAmelCase__ : Any , **lowerCAmelCase__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor.pad(*lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCamelCase = kwargs.pop('''input_features''' , lowerCAmelCase__ )
_UpperCamelCase = kwargs.pop('''labels''' , lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
_UpperCamelCase = args[0]
_UpperCamelCase = args[1:]
if input_features is not None:
_UpperCamelCase = self.feature_extractor.pad(lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ )
if labels is not None:
_UpperCamelCase = self.tokenizer.pad(lowerCAmelCase__ , **lowerCAmelCase__ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
_UpperCamelCase = labels['''input_ids''']
return input_features
def snake_case__ ( self : str , *lowerCAmelCase__ : Any , **lowerCAmelCase__ : Union[str, Any] ) -> int:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case__ ( self : int , *lowerCAmelCase__ : Union[str, Any] , **lowerCAmelCase__ : int ) -> int:
'''simple docstring'''
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@contextmanager
def snake_case__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
_UpperCamelCase = True
_UpperCamelCase = self.tokenizer
yield
_UpperCamelCase = self.feature_extractor
_UpperCamelCase = False
| 98 | '''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__snake_case : Dict = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
__snake_case : str = json.load(f)
@require_torch
class lowercase_ ( unittest.TestCase ):
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Dict:
"""simple docstring"""
return FSMTTokenizer.from_pretrained(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
UpperCAmelCase_ = FSMTForConditionalGeneration.from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["en-ru", 26.0],
["ru-en", 22.0],
["en-de", 22.0],
["de-en", 29.0],
] )
@slow
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = F"""facebook/wmt19-{pair}"""
UpperCAmelCase_ = self.get_tokenizer(UpperCamelCase__ )
UpperCAmelCase_ = self.get_model(UpperCamelCase__ )
UpperCAmelCase_ = bleu_data[pair]["src"]
UpperCAmelCase_ = bleu_data[pair]["tgt"]
UpperCAmelCase_ = tokenizer(UpperCamelCase__ , return_tensors="pt" , truncation=UpperCamelCase__ , padding="longest" ).to(UpperCamelCase__ )
UpperCAmelCase_ = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
UpperCAmelCase_ = tokenizer.batch_decode(
UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )
UpperCAmelCase_ = calculate_bleu(UpperCamelCase__ , UpperCamelCase__ )
print(UpperCamelCase__ )
self.assertGreaterEqual(scores["bleu"] , UpperCamelCase__ )
| 660 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/tokenizer.json',
},
}
SCREAMING_SNAKE_CASE = {
'camembert-base': 5_1_2,
}
SCREAMING_SNAKE_CASE = '▁'
class __UpperCAmelCase ( __A ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ["""input_ids""", """attention_mask"""]
_lowerCamelCase = CamembertTokenizer
def __init__( self , __A=None , __A=None , __A="<s>" , __A="</s>" , __A="</s>" , __A="<s>" , __A="<unk>" , __A="<pad>" , __A="<mask>" , __A=["<s>NOTUSED", "</s>NOTUSED"] , **__A , ):
# Mask token behave like a normal word, i.e. include the space before it
__a = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
super().__init__(
__A , tokenizer_file=__A , bos_token=__A , eos_token=__A , sep_token=__A , cls_token=__A , unk_token=__A , pad_token=__A , mask_token=__A , additional_special_tokens=__A , **__A , )
__a = vocab_file
__a = False if not self.vocab_file else True
def snake_case_ ( self , __A , __A = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__a = [self.cls_token_id]
__a = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case_ ( self , __A , __A = None ):
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case_ ( self , __A , __A = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a = os.path.join(
__A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ):
copyfile(self.vocab_file , __A )
return (out_vocab_file,)
| 99 | '''simple docstring'''
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
__snake_case : List[Any] = {
'''sample_size''': 32,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': 10_00,
'''block_out_channels''': [32, 64],
'''attention_head_dim''': 8,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
__snake_case : Optional[int] = {
'''sample_size''': 64,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 3,
'''num_class_embeds''': 10_00,
'''block_out_channels''': [1_92, 1_92 * 2, 1_92 * 3, 1_92 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
__snake_case : int = {
'''sample_size''': 2_56,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': None,
'''block_out_channels''': [2_56, 2_56, 2_56 * 2, 2_56 * 2, 2_56 * 4, 2_56 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''default''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
__snake_case : Dict = {
'''num_train_timesteps''': 40,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
__snake_case : Tuple = {
'''num_train_timesteps''': 2_01,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
__snake_case : str = {
'''num_train_timesteps''': 1_51,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
def lowerCamelCase__ ( A_ ):
if isinstance(A_ , A_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("boolean value expected" )
def lowerCamelCase__ ( A_ , A_ , A_ , A_ , A_=False ):
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.0.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.0.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.2.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.2.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.emb_layers.1.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.emb_layers.1.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.0.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.0.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.3.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.3.bias"""]
if has_skip:
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.skip_connection.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.skip_connection.bias"""]
return new_checkpoint
def lowerCamelCase__ ( A_ , A_ , A_ , A_ , A_=None ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = checkpoint[F"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = checkpoint[F"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 )
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.norm.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.norm.bias"""]
UpperCAmelCase_ = weight_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = bias_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = weight_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = bias_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = weight_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = bias_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = (
checkpoint[F"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 )
)
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def lowerCamelCase__ ( A_ , A_ ):
UpperCAmelCase_ = torch.load(A_ , map_location="cpu" )
UpperCAmelCase_ = {}
UpperCAmelCase_ = checkpoint["time_embed.0.weight"]
UpperCAmelCase_ = checkpoint["time_embed.0.bias"]
UpperCAmelCase_ = checkpoint["time_embed.2.weight"]
UpperCAmelCase_ = checkpoint["time_embed.2.bias"]
if unet_config["num_class_embeds"] is not None:
UpperCAmelCase_ = checkpoint["label_emb.weight"]
UpperCAmelCase_ = checkpoint["input_blocks.0.0.weight"]
UpperCAmelCase_ = checkpoint["input_blocks.0.0.bias"]
UpperCAmelCase_ = unet_config["down_block_types"]
UpperCAmelCase_ = unet_config["layers_per_block"]
UpperCAmelCase_ = unet_config["attention_head_dim"]
UpperCAmelCase_ = unet_config["block_out_channels"]
UpperCAmelCase_ = 1
UpperCAmelCase_ = channels_list[0]
for i, layer_type in enumerate(A_ ):
UpperCAmelCase_ = channels_list[i]
UpperCAmelCase_ = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(A_ ):
UpperCAmelCase_ = F"""down_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase_ = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(A_ ):
UpperCAmelCase_ = F"""down_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase_ = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
UpperCAmelCase_ = F"""down_blocks.{i}.attentions.{j}"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.1"""
UpperCAmelCase_ = convert_attention(
A_ , A_ , A_ , A_ , A_ )
current_layer += 1
if i != len(A_ ) - 1:
UpperCAmelCase_ = F"""down_blocks.{i}.downsamplers.0"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
current_layer += 1
UpperCAmelCase_ = current_channels
# hardcoded the mid-block for now
UpperCAmelCase_ = "mid_block.resnets.0"
UpperCAmelCase_ = "middle_block.0"
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
UpperCAmelCase_ = "mid_block.attentions.0"
UpperCAmelCase_ = "middle_block.1"
UpperCAmelCase_ = convert_attention(A_ , A_ , A_ , A_ , A_ )
UpperCAmelCase_ = "mid_block.resnets.1"
UpperCAmelCase_ = "middle_block.2"
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
UpperCAmelCase_ = 0
UpperCAmelCase_ = unet_config["up_block_types"]
for i, layer_type in enumerate(A_ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase_ = F"""up_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""output_blocks.{current_layer}.0"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
current_layer += 1
if i != len(A_ ) - 1:
UpperCAmelCase_ = F"""up_blocks.{i}.upsamplers.0"""
UpperCAmelCase_ = F"""output_blocks.{current_layer-1}.1"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase_ = F"""up_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""output_blocks.{current_layer}.0"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
UpperCAmelCase_ = F"""up_blocks.{i}.attentions.{j}"""
UpperCAmelCase_ = F"""output_blocks.{current_layer}.1"""
UpperCAmelCase_ = convert_attention(
A_ , A_ , A_ , A_ , A_ )
current_layer += 1
if i != len(A_ ) - 1:
UpperCAmelCase_ = F"""up_blocks.{i}.upsamplers.0"""
UpperCAmelCase_ = F"""output_blocks.{current_layer-1}.2"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
UpperCAmelCase_ = checkpoint["out.0.weight"]
UpperCAmelCase_ = checkpoint["out.0.bias"]
UpperCAmelCase_ = checkpoint["out.2.weight"]
UpperCAmelCase_ = checkpoint["out.2.bias"]
return new_checkpoint
if __name__ == "__main__":
__snake_case : List[str] = argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
__snake_case : List[str] = parser.parse_args()
__snake_case : Any = strabool(args.class_cond)
__snake_case : List[str] = os.path.basename(args.unet_path)
print(F'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
__snake_case : Optional[int] = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__snake_case : Union[str, Any] = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
__snake_case : List[str] = TEST_UNET_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
__snake_case : Optional[Any] = None
__snake_case : Optional[int] = con_pt_to_diffuser(args.unet_path, unet_config)
__snake_case : str = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
__snake_case : Tuple = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
__snake_case : Optional[int] = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__snake_case : Union[str, Any] = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
__snake_case : Optional[Any] = CMStochasticIterativeScheduler(**scheduler_config)
__snake_case : Dict = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 660 | 0 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
_A : Dict = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""")
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Any = BartphoTokenizer
lowerCamelCase__ : Tuple = False
lowerCamelCase__ : Dict = True
def lowercase_ ( self ):
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE__ = ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']
SCREAMING_SNAKE_CASE__ = dict(zip(A_ , range(len(A_ ) ) ) )
SCREAMING_SNAKE_CASE__ = {'''unk_token''': '''<unk>'''}
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''] )
with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(f'''{token} {vocab_tokens[token]}\n''' )
SCREAMING_SNAKE_CASE__ = BartphoTokenizer(A_ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ ( self , **A_ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **A_ )
def lowercase_ ( self , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = '''This is a là test'''
SCREAMING_SNAKE_CASE__ = '''This is a<unk><unk> test'''
return input_text, output_text
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = BartphoTokenizer(A_ , self.monolingual_vocab_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE__ = '''This is a là test'''
SCREAMING_SNAKE_CASE__ = '''▁This ▁is ▁a ▁l à ▁t est'''.split()
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
SCREAMING_SNAKE_CASE__ = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , A_ )
| 100 | '''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
__snake_case : Any = _symbol_database.Default()
__snake_case : Dict = _descriptor_pool.Default().AddSerializedFile(
B'''\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'''
)
__snake_case : Union[str, Any] = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, '''sentencepiece_model_pb2''', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
__snake_case : Any = None
__snake_case : Dict = B'''H\003'''
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
__snake_case : Union[str, Any] = 45
__snake_case : str = 15_81
__snake_case : Optional[int] = 15_17
__snake_case : Optional[Any] = 15_70
__snake_case : Union[str, Any] = 15_84
__snake_case : Any = 17_93
__snake_case : Optional[int] = 17_95
__snake_case : Tuple = 19_16
__snake_case : int = 18_64
__snake_case : Any = 19_05
__snake_case : Optional[int] = 19_19
__snake_case : str = 24_29
__snake_case : Tuple = 22_08
__snake_case : str = 24_18
__snake_case : Tuple = 23_23
__snake_case : Optional[int] = 24_07
# @@protoc_insertion_point(module_scope)
| 660 | 0 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ : Dict =get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class __lowercase (__SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase = XLMRobertaTokenizer
_UpperCAmelCase = XLMRobertaTokenizerFast
_UpperCAmelCase = True
_UpperCAmelCase = True
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE_ : List[Any] = XLMRobertaTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = '<pad>'
SCREAMING_SNAKE_CASE_ : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(lowerCAmelCase__ ) , 1_0_0_2 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_2 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = XLMRobertaTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowerCAmelCase__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
SCREAMING_SNAKE_CASE_ : str = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
SCREAMING_SNAKE_CASE_ : str = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-xlm-roberta', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE_ : List[Any] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ : str = tokenizer_r.save_pretrained(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
SCREAMING_SNAKE_CASE_ : List[Any] = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer_r.from_pretrained(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCAmelCase__ )
# Save tokenizer rust, legacy_format=True
SCREAMING_SNAKE_CASE_ : int = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer_r.from_pretrained(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
shutil.rmtree(lowerCAmelCase__ )
# Save tokenizer rust, legacy_format=False
SCREAMING_SNAKE_CASE_ : Any = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer_r.from_pretrained(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
shutil.rmtree(lowerCAmelCase__ )
@cached_property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return XLMRobertaTokenizer.from_pretrained('xlm-roberta-base' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCAmelCase__ , f.name )
SCREAMING_SNAKE_CASE_ : List[str] = XLMRobertaTokenizer(f.name , keep_accents=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = pickle.dumps(lowerCAmelCase__ )
pickle.loads(lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ : Any = 'I was born in 92000, and this is falsé.'
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer.tokenize(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.encode(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = 'Hello World!'
SCREAMING_SNAKE_CASE_ : Optional[Any] = [0, 3_5_3_7_8, 6_6_6_1, 3_8, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__ ) )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
0,
3_2_9_3,
8_3,
1_0,
4_5_5_2,
4_9_8_9,
7_9_8_6,
6_7_8,
1_0,
5_9_1_5,
1_1_1,
1_7_9_4_5_9,
1_2_4_8_5_0,
4,
6_0_4_4,
2_3_7,
1_2,
6,
5,
6,
4,
6_7_8_0,
7_0_5,
1_5,
1_3_8_8,
4_4,
3_7_8,
1_0_1_1_4,
7_1_1,
1_5_2,
2_0,
6,
5,
2_2_3_7_6,
6_4_2,
1_2_2_1,
1_5_1_9_0,
3_4_1_5_3,
4_5_0,
5_6_0_8,
9_5_9,
1_1_1_9,
5_7_7_0_2,
1_3_6,
1_8_6,
4_7,
1_0_9_8,
2_9_3_6_7,
4_7,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6_0_4_4,
2_3_7,
6_2_8_4,
5_0_9_0_1,
5_2_8,
3_1,
9_0,
3_4,
9_2_7,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__ ) )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = {'input_ids': [[0, 1_1_0_6_2, 8_2_7_7_2, 7, 1_5, 8_2_7_7_2, 5_3_8, 5_1_5_2_9, 2_3_7, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 2_1_5_1_7_5, 1_3_1_4, 1_3_6, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 5_6_3_5_9, 4_2, 1_2_2_0_0_9, 9, 1_6_4_6_6, 1_6, 8_7_3_4_4, 4_5_3_7, 9, 4_7_1_7, 7_8_3_8_1, 6, 1_5_9_9_5_8, 7, 1_5, 2_4_4_8_0, 6_1_8, 4, 5_2_7, 2_2_6_9_3, 5_4_2_8, 4, 2_7_7_7, 2_4_4_8_0, 9_8_7_4, 4, 4_3_5_2_3, 5_9_4, 4, 8_0_3, 1_8_3_9_2, 3_3_1_8_9, 1_8, 4, 4_3_5_2_3, 2_4_4_4_7, 1_2_3_9_9, 1_0_0, 2_4_9_5_5, 8_3_6_5_8, 9_6_2_6, 1_4_4_0_5_7, 1_5, 8_3_9, 2_2_3_3_5, 1_6, 1_3_6, 2_4_9_5_5, 8_3_6_5_8, 8_3_4_7_9, 1_5, 3_9_1_0_2, 7_2_4, 1_6, 6_7_8, 6_4_5, 2_7_8_9, 1_3_2_8, 4_5_8_9, 4_2, 1_2_2_0_0_9, 1_1_5_7_7_4, 2_3, 8_0_5, 1_3_2_8, 4_6_8_7_6, 7, 1_3_6, 5_3_8_9_4, 1_9_4_0, 4_2_2_2_7, 4_1_1_5_9, 1_7_7_2_1, 8_2_3, 4_2_5, 4, 2_7_5_1_2, 9_8_7_2_2, 2_0_6, 1_3_6, 5_5_3_1, 4_9_7_0, 9_1_9, 1_7_3_3_6, 5, 2], [0, 2_0_0_8_0, 6_1_8, 8_3, 8_2_7_7_5, 4_7, 4_7_9, 9, 1_5_1_7, 7_3, 5_3_8_9_4, 3_3_3, 8_0_5_8_1, 1_1_0_1_1_7, 1_8_8_1_1, 5_2_5_6, 1_2_9_5, 5_1, 1_5_2_5_2_6, 2_9_7, 7_9_8_6, 3_9_0, 1_2_4_4_1_6, 5_3_8, 3_5_4_3_1, 2_1_4, 9_8, 1_5_0_4_4, 2_5_7_3_7, 1_3_6, 7_1_0_8, 4_3_7_0_1, 2_3, 7_5_6, 1_3_5_3_5_5, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_8_1, 6_3_7_7_3, 1_1_9_4_5_5, 6, 1_4_7_7_9_7, 8_8_2_0_3, 7, 6_4_5, 7_0, 2_1, 3_2_8_5, 1_0_2_6_9, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name='xlm-roberta-base' , revision='d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3' , )
| 101 | '''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowercase_ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase_ = FlaxXLMRobertaModel.from_pretrained("xlm-roberta-base" )
UpperCAmelCase_ = AutoTokenizer.from_pretrained("xlm-roberta-base" )
UpperCAmelCase_ = "The dog is cute and lives in the garden house"
UpperCAmelCase_ = jnp.array([tokenizer.encode(UpperCamelCase__ )] )
UpperCAmelCase_ = (1, 1_2, 7_6_8) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
UpperCAmelCase_ = model(UpperCamelCase__ )["last_hidden_state"]
self.assertEqual(output.shape , UpperCamelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , UpperCamelCase__ , atol=1e-3 ) )
| 660 | 0 |
"""simple docstring"""
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def UpperCamelCase (SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=100 , SCREAMING_SNAKE_CASE=1026 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE="data/tokenized_stories_train_wikitext103.jbl" , SCREAMING_SNAKE_CASE="igf_context_pairs.jbl" , ):
set_seed(3 )
# generate train_data and objective_set
UpperCamelCase , UpperCamelCase : Optional[int] = generate_datasets(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , number=SCREAMING_SNAKE_CASE , min_len=1026 , trim=SCREAMING_SNAKE_CASE )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
UpperCamelCase : Dict = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# load pretrained model
UpperCamelCase : Dict = load_gpta("""gpt2""" ).to(SCREAMING_SNAKE_CASE )
print("""computing perplexity on objective set""" )
UpperCamelCase : Dict = compute_perplexity(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).item()
print("""perplexity on objective set:""" , SCREAMING_SNAKE_CASE )
# collect igf pairs and save to file demo.jbl
collect_objective_set(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=15 , SCREAMING_SNAKE_CASE=128 , SCREAMING_SNAKE_CASE=100 , SCREAMING_SNAKE_CASE="igf_model.pt" , ):
set_seed(42 )
# Load pre-trained model
UpperCamelCase : int = GPTaLMHeadModel.from_pretrained("""gpt2""" )
# Initialize secondary learner to use embedding weights of model
UpperCamelCase : Tuple = SecondaryLearner(SCREAMING_SNAKE_CASE )
# Train secondary learner
UpperCamelCase : Dict = train_secondary_learner(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , max_epochs=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , eval_freq=100 , igf_model_path=SCREAMING_SNAKE_CASE , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=1000 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=recopy_gpta , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE="gpt2_finetuned.pt" , ):
UpperCamelCase : Optional[Any] = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
UpperCamelCase : int = RandomSampler(SCREAMING_SNAKE_CASE )
UpperCamelCase : int = DataLoader(SCREAMING_SNAKE_CASE , sampler=SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = max_steps // (len(SCREAMING_SNAKE_CASE )) + 1
UpperCamelCase : Any = 0
UpperCamelCase : Union[str, Any] = torch.zeros((1, context_len) , dtype=torch.long , device=SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[int] = recopy_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
model.train()
if secondary_learner is not None:
secondary_learner.to(SCREAMING_SNAKE_CASE )
secondary_learner.eval()
UpperCamelCase : Optional[Any] = []
UpperCamelCase : List[Any] = 0
UpperCamelCase : Dict = []
UpperCamelCase : Any = []
# Compute the performance of the transformer model at the beginning
UpperCamelCase : str = compute_perplexity(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
test_perps.append(SCREAMING_SNAKE_CASE )
print("""Test perplexity, step""" , SCREAMING_SNAKE_CASE , """:""" , SCREAMING_SNAKE_CASE )
for epoch in range(int(SCREAMING_SNAKE_CASE ) ):
for step, example in enumerate(SCREAMING_SNAKE_CASE ):
torch.cuda.empty_cache()
UpperCamelCase : str = random.randint(0 , example.size(2 ) - context_len - 1 )
UpperCamelCase : int = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
UpperCamelCase : Optional[int] = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = True
if secondary_learner is not None:
UpperCamelCase : Dict = secondary_learner.forward(
torch.tensor(SCREAMING_SNAKE_CASE , dtype=torch.long , device=SCREAMING_SNAKE_CASE ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(SCREAMING_SNAKE_CASE ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
UpperCamelCase : str = -1
if predicted_q < threshold:
UpperCamelCase : List[Any] = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
UpperCamelCase : Any = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
UpperCamelCase : Optional[Any] = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
UpperCamelCase : List[str] = compute_perplexity(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
test_perps.append(SCREAMING_SNAKE_CASE )
print("""Test perplexity, step""" , SCREAMING_SNAKE_CASE , """:""" , SCREAMING_SNAKE_CASE )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def UpperCamelCase ():
UpperCamelCase : List[Any] = argparse.ArgumentParser(description="""Fine-tune a transformer model with IGF on a language modeling task""" )
# Required parameters
parser.add_argument(
"""--data_dir""" , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help="""The input data dir. Should contain data files for WikiText.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--data_file""" , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help=(
"""A jbl file containing tokenized data which can be split as objective dataset, """
"""train_dataset and test_dataset."""
) , )
parser.add_argument(
"""--igf_data_file""" , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help="""A jbl file containing the context and information gain pairs to train secondary learner.""" , )
parser.add_argument(
"""--output_dir""" , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help="""The output directory where the final fine-tuned model is stored.""" , )
parser.add_argument(
"""--tokenizer_name""" , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , help="""Pretrained tokenizer name or path if not the same as model_name""" , )
parser.add_argument("""--seed""" , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help="""A seed for reproducible training.""" )
parser.add_argument(
"""--context_len""" , default=32 , type=SCREAMING_SNAKE_CASE , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--size_objective_set""" , default=100 , type=SCREAMING_SNAKE_CASE , help="""number of articles that are long enough to be used as our objective set""" , )
parser.add_argument(
"""--eval_freq""" , default=100 , type=SCREAMING_SNAKE_CASE , help="""secondary model evaluation is triggered at eval_freq""" )
parser.add_argument("""--max_steps""" , default=1000 , type=SCREAMING_SNAKE_CASE , help="""To calculate training epochs""" )
parser.add_argument(
"""--secondary_learner_batch_size""" , default=128 , type=SCREAMING_SNAKE_CASE , help="""batch size of training data for secondary learner""" , )
parser.add_argument(
"""--batch_size""" , default=16 , type=SCREAMING_SNAKE_CASE , help="""batch size of training data of language model(gpt2) """ )
parser.add_argument(
"""--eval_interval""" , default=10 , type=SCREAMING_SNAKE_CASE , help=(
"""decay the selectivity of our secondary learner filter from"""
"""1 standard deviation above average to 1 below average after 10 batches"""
) , )
parser.add_argument(
"""--number""" , default=100 , type=SCREAMING_SNAKE_CASE , help="""The number of examples split to be used as objective_set/test_data""" )
parser.add_argument(
"""--min_len""" , default=1026 , type=SCREAMING_SNAKE_CASE , help="""The minimum length of the article to be used as objective set""" )
parser.add_argument(
"""--secondary_learner_max_epochs""" , default=15 , type=SCREAMING_SNAKE_CASE , help="""number of epochs to train secondary learner""" )
parser.add_argument("""--trim""" , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , help="""truncate the example if it exceeds context length""" )
parser.add_argument(
"""--threshold""" , default=1.0 , type=SCREAMING_SNAKE_CASE , help=(
"""The threshold value used by secondary learner to filter the train_data and allow only"""
""" informative data as input to the model"""
) , )
parser.add_argument("""--finetuned_model_name""" , default="""gpt2_finetuned.pt""" , type=SCREAMING_SNAKE_CASE , help="""finetuned_model_name""" )
parser.add_argument(
"""--recopy_model""" , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , help="""Reset the model to the original pretrained GPT-2 weights after each iteration""" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=SCREAMING_SNAKE_CASE , data_file="""data/tokenized_stories_train_wikitext103.jbl""" , igf_data_file="""igf_context_pairs.jbl""" , )
# Load train data for secondary learner
UpperCamelCase : Any = joblib.load("""data/IGF_values.jbl""" )
# Train secondary learner
UpperCamelCase : List[str] = training_secondary_learner(
SCREAMING_SNAKE_CASE , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="""igf_model.pt""" , )
# load pretrained gpt2 model
UpperCamelCase : int = GPTaLMHeadModel.from_pretrained("""gpt2""" )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
UpperCamelCase , UpperCamelCase : List[str] = generate_datasets(
context_len=32 , file="""data/tokenized_stories_train_wikitext103.jbl""" , number=100 , min_len=1026 , trim=SCREAMING_SNAKE_CASE )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=SCREAMING_SNAKE_CASE , secondary_learner=SCREAMING_SNAKE_CASE , eval_interval=10 , finetuned_model_name="""gpt2_finetuned.pt""" , )
if __name__ == "__main__":
main()
| 102 | '''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCamelCase__ ( A_ , A_ , A_ ):
# Construct model
if gpta_config_file == "":
UpperCAmelCase_ = GPTaConfig()
else:
UpperCAmelCase_ = GPTaConfig.from_json_file(A_ )
UpperCAmelCase_ = GPTaModel(A_ )
# Load weights from numpy
load_tf_weights_in_gpta(A_ , A_ , A_ )
# Save pytorch-model
UpperCAmelCase_ = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
UpperCAmelCase_ = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , A_ )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(A_ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__snake_case : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--gpt2_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
__snake_case : Dict = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 660 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
snake_case = {'''configuration_speech_encoder_decoder''': ['''SpeechEncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ['''SpeechEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ['''FlaxSpeechEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 103 | '''simple docstring'''
from typing import List
from .keymap import KEYMAP, get_character
def lowerCamelCase__ ( A_ ):
def decorator(A_ ):
UpperCAmelCase_ = getattr(A_ , "handle_key" , [] )
handle += [key]
setattr(A_ , "handle_key" , A_ )
return func
return decorator
def lowerCamelCase__ ( *A_ ):
def decorator(A_ ):
UpperCAmelCase_ = getattr(A_ , "handle_key" , [] )
handle += keys
setattr(A_ , "handle_key" , A_ )
return func
return decorator
class lowercase_ ( _A ):
def __new__( cls , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = super().__new__(cls , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not hasattr(UpperCamelCase__ , "key_handler" ):
setattr(UpperCamelCase__ , "key_handler" , {} )
setattr(UpperCamelCase__ , "handle_input" , KeyHandler.handle_input )
for value in attrs.values():
UpperCAmelCase_ = getattr(UpperCamelCase__ , "handle_key" , [] )
for key in handled_keys:
UpperCAmelCase_ = value
return new_cls
@staticmethod
def lowerCamelCase_ ( cls ) -> str:
"""simple docstring"""
UpperCAmelCase_ = get_character()
if char != KEYMAP["undefined"]:
UpperCAmelCase_ = ord(UpperCamelCase__ )
UpperCAmelCase_ = cls.key_handler.get(UpperCamelCase__ )
if handler:
UpperCAmelCase_ = char
return handler(cls )
else:
return None
def lowerCamelCase__ ( cls ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 660 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase__ ( _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
A__ : Union[str, Any] = XLMTokenizer
A__ : Tuple = False
def snake_case__ ( self ) -> List[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A__ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
A__ = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
A__ = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE__ ) )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> Tuple:
A__ = "lower newer"
A__ = "lower newer"
return input_text, output_text
def snake_case__ ( self ) -> Any:
A__ = XLMTokenizer(self.vocab_file , self.merges_file )
A__ = "lower"
A__ = ["low", "er</w>"]
A__ = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = tokens + ["<unk>"]
A__ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
@slow
def snake_case__ ( self ) -> List[str]:
A__ = XLMTokenizer.from_pretrained("xlm-mlm-en-2048" )
A__ = tokenizer.encode("sequence builders" , add_special_tokens=SCREAMING_SNAKE_CASE__ )
A__ = tokenizer.encode("multi-sequence build" , add_special_tokens=SCREAMING_SNAKE_CASE__ )
A__ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ )
A__ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 104 | '''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__snake_case : Optional[Any] = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowercase_ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=1_6 , UpperCamelCase__=1_3 , UpperCamelCase__=7 , UpperCamelCase__=1_4 , UpperCamelCase__=1_0 , UpperCamelCase__=1_9 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=True , UpperCamelCase__=1_6 , UpperCamelCase__=2 , UpperCamelCase__=4 , UpperCamelCase__=4 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=[1, 2, 3, 4, 5] , UpperCamelCase__=2_5 , UpperCamelCase__=5 , ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = d_model
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = prediction_length
UpperCAmelCase_ = context_length
UpperCAmelCase_ = cardinality
UpperCAmelCase_ = num_time_features
UpperCAmelCase_ = lags_sequence
UpperCAmelCase_ = embedding_dimension
UpperCAmelCase_ = is_training
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = context_length
UpperCAmelCase_ = prediction_length + label_length
UpperCAmelCase_ = label_length
UpperCAmelCase_ = moving_average
UpperCAmelCase_ = autocorrelation_factor
def lowerCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = config.context_length + max(config.lags_sequence )
UpperCAmelCase_ = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
UpperCAmelCase_ = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
UpperCAmelCase_ = floats_tensor([self.batch_size, _past_length] )
UpperCAmelCase_ = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
UpperCAmelCase_ = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
UpperCAmelCase_ = floats_tensor([self.batch_size, config.prediction_length] )
UpperCAmelCase_ = {
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = self.get_config()
UpperCAmelCase_ = self.prepare_autoformer_inputs_dict(UpperCamelCase__ )
return config, inputs_dict
def lowerCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> str:
"""simple docstring"""
UpperCAmelCase_ = AutoformerModel(config=UpperCamelCase__ ).to(UpperCamelCase__ ).eval()
UpperCAmelCase_ = model(**UpperCamelCase__ )
UpperCAmelCase_ = outputs.encoder_last_hidden_state
UpperCAmelCase_ = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = model.get_encoder()
encoder.save_pretrained(UpperCamelCase__ )
UpperCAmelCase_ = AutoformerEncoder.from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = model.create_network_inputs(**UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
UpperCAmelCase_ = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
UpperCAmelCase_ = encoder(inputs_embeds=UpperCamelCase__ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
UpperCAmelCase_ = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
UpperCAmelCase_ = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
UpperCAmelCase_ = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
UpperCAmelCase_ = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = model.get_decoder()
decoder.save_pretrained(UpperCamelCase__ )
UpperCAmelCase_ = AutoformerDecoder.from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
UpperCAmelCase_ = decoder(
trend=UpperCamelCase__ , inputs_embeds=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class lowercase_ ( _A , _A , unittest.TestCase ):
a_ = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
a_ = (AutoformerForPrediction,) if is_torch_available() else ()
a_ = {"""feature-extraction""": AutoformerModel} if is_torch_available() else {}
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = AutoformerModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = model_class.from_pretrained(UpperCamelCase__ , output_loading_info=UpperCamelCase__ )
self.assertEqual(info["missing_keys"] , [] )
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*UpperCamelCase__ )
@unittest.skip(reason="Model has no tokens embeddings" )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
pass
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase_ = inspect.signature(getattr(UpperCamelCase__ , "forward" ) )
# The main input is the name of the argument after `self`
UpperCAmelCase_ = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , UpperCamelCase__ )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(UpperCamelCase__ )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = [
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(UpperCamelCase__ )] , UpperCamelCase__ )
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = True
UpperCAmelCase_ = getattr(self.model_tester , "seq_length" , UpperCamelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , "decoder_seq_length" , UpperCamelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , "encoder_seq_length" , UpperCamelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , "d_model" , UpperCamelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , "num_attention_heads" , UpperCamelCase__ )
UpperCAmelCase_ = d_model // num_attention_heads
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
UpperCAmelCase_ = False
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
UpperCAmelCase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
UpperCAmelCase_ = outputs.encoder_attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
UpperCAmelCase_ = len(UpperCamelCase__ )
UpperCAmelCase_ = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
# decoder attentions
UpperCAmelCase_ = outputs.decoder_attentions
self.assertIsInstance(UpperCamelCase__ , (list, tuple) )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
UpperCAmelCase_ = outputs.cross_attentions
self.assertIsInstance(UpperCamelCase__ , (list, tuple) )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
UpperCAmelCase_ = True
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + 2 , len(UpperCamelCase__ ) )
UpperCAmelCase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def lowerCamelCase_ ( self ) -> str:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def lowerCamelCase__ ( A_="train-batch.pt" ):
UpperCAmelCase_ = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=A_ , repo_type="dataset" )
UpperCAmelCase_ = torch.load(A_ , map_location=A_ )
return batch
@require_torch
@slow
class lowercase_ ( unittest.TestCase ):
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(UpperCamelCase__ )
UpperCAmelCase_ = prepare_batch()
with torch.no_grad():
UpperCAmelCase_ = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
UpperCAmelCase_ = torch.Size(
(6_4, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , UpperCamelCase__ )
UpperCAmelCase_ = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
def lowerCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(UpperCamelCase__ )
UpperCAmelCase_ = prepare_batch("val-batch.pt" )
with torch.no_grad():
UpperCAmelCase_ = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
UpperCAmelCase_ = torch.Size((6_4, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , UpperCamelCase__ )
UpperCAmelCase_ = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(UpperCamelCase__ )
UpperCAmelCase_ = prepare_batch("val-batch.pt" )
with torch.no_grad():
UpperCAmelCase_ = model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
UpperCAmelCase_ = torch.Size((6_4, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , UpperCamelCase__ )
UpperCAmelCase_ = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=UpperCamelCase__ )
UpperCAmelCase_ = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , UpperCamelCase__ , rtol=1e-1 ) )
| 660 | 0 |
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
UpperCamelCase__ : Dict = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __init__( self ,*snake_case__ ,**snake_case__ ):
warnings.warn(
'The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ChineseCLIPImageProcessor instead.' ,snake_case__ ,)
super().__init__(*snake_case__ ,**snake_case__ )
| 105 | '''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case : Dict = logging.get_logger(__name__)
__snake_case : Tuple = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
__snake_case : Tuple = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
__snake_case : Dict = {
'''abeja/gpt-neox-japanese-2.7b''': 20_48,
}
def lowerCamelCase__ ( A_ , A_ ):
with open(A_ , "r" , encoding="utf-8" ) as f:
UpperCAmelCase_ = json.loads(f.read() )
UpperCAmelCase_ = collections.OrderedDict()
UpperCAmelCase_ = collections.OrderedDict()
UpperCAmelCase_ = collections.OrderedDict()
with open(A_ , "r" , encoding="utf-8" ) as f:
UpperCAmelCase_ = f.readlines()
UpperCAmelCase_ = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(A_ ):
UpperCAmelCase_ = b
UpperCAmelCase_ = idx
for wd in b:
UpperCAmelCase_ = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class lowercase_ ( _A ):
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|startoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__=False , **UpperCamelCase__ , ) -> int:
"""simple docstring"""
super().__init__(
unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , do_clean_text=UpperCamelCase__ , **UpperCamelCase__ , )
if not os.path.isfile(UpperCamelCase__ ):
raise ValueError(
F"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
if not os.path.isfile(UpperCamelCase__ ):
raise ValueError(
F"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
UpperCAmelCase_ = do_clean_text
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = load_vocab_and_emoji(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase_ = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
return len(self.raw_vocab )
def lowerCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
return self.subword_tokenizer.tokenize(UpperCamelCase__ , clean=self.do_clean_text )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> int:
"""simple docstring"""
return self.vocab.get(UpperCamelCase__ , self.vocab.get(self.unk_token ) )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[Any]:
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = "".join(UpperCamelCase__ ).strip()
return out_string
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[int]:
"""simple docstring"""
UpperCAmelCase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] )
if len(UpperCamelCase__ ) > self.model_max_length:
UpperCAmelCase_ = input_ids[-self.model_max_length :]
return input_ids
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
"""simple docstring"""
UpperCAmelCase_ = 0
if os.path.isdir(UpperCamelCase__ ):
UpperCAmelCase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] )
else:
UpperCAmelCase_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
UpperCAmelCase_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!" )
UpperCAmelCase_ = token_index
writer.write(",".join(UpperCamelCase__ ) + "\n" )
index += 1
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as writer:
json.dump(self.emoji , UpperCamelCase__ )
return vocab_file, emoji_file
class lowercase_ ( _A ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = vocab # same as swe
UpperCAmelCase_ = ids_to_tokens # same as bpe
UpperCAmelCase_ = emoji
UpperCAmelCase_ = np.max([len(UpperCamelCase__ ) for w in self.vocab.keys()] )
UpperCAmelCase_ = re.compile(R"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)" )
UpperCAmelCase_ = re.compile(R"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*" )
UpperCAmelCase_ = re.compile(R"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}" )
UpperCAmelCase_ = re.compile(
R"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
UpperCAmelCase_ = re.compile(
R"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
UpperCAmelCase_ = re.compile(
R"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" )
UpperCAmelCase_ = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
UpperCAmelCase_ = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
UpperCAmelCase_ = str.maketrans({k: "<BLOCK>" for k in keisen + blocks} )
def __len__( self ) -> int:
"""simple docstring"""
return len(self.ids_to_tokens )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = self.content_repattera.sub("<URL>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<EMAIL>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<TEL>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<DATE>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<DATE>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<PRICE>" , UpperCamelCase__ )
UpperCAmelCase_ = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
UpperCAmelCase_ = content.replace("<BLOCK><BLOCK>" , "<BLOCK>" )
return content
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__=False ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = text.replace(" " , "<SP>" )
UpperCAmelCase_ = text.replace(" " , "<SP>" )
UpperCAmelCase_ = text.replace("\r\n" , "<BR>" )
UpperCAmelCase_ = text.replace("\n" , "<BR>" )
UpperCAmelCase_ = text.replace("\r" , "<BR>" )
UpperCAmelCase_ = text.replace("\t" , "<TAB>" )
UpperCAmelCase_ = text.replace("—" , "ー" )
UpperCAmelCase_ = text.replace("−" , "ー" )
for k, v in self.emoji["emoji"].items():
if k in text:
UpperCAmelCase_ = text.replace(UpperCamelCase__ , UpperCamelCase__ )
if clean:
UpperCAmelCase_ = self.clean_text(UpperCamelCase__ )
def check_simbol(UpperCamelCase__ ):
UpperCAmelCase_ = x.encode()
if len(UpperCamelCase__ ) == 1 and len(UpperCamelCase__ ) == 2:
UpperCAmelCase_ = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0Xc_2_a_1 and c <= 0Xc_2_b_f)
or (c >= 0Xc_7_8_0 and c <= 0Xc_7_8_3)
or (c >= 0Xc_a_b_9 and c <= 0Xc_b_b_f)
or (c >= 0Xc_c_8_0 and c <= 0Xc_d_a_2)
):
return True
return False
def checkuae(UpperCamelCase__ ):
UpperCAmelCase_ = x.encode()
if len(UpperCamelCase__ ) == 1 and len(UpperCamelCase__ ) == 3:
UpperCAmelCase_ = (int(e[0] ) << 1_6) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0Xe_2_8_0_8_0 and c <= 0Xe_2_b_0_7_f:
return True
return False
UpperCAmelCase_ = 0
UpperCAmelCase_ = []
while pos < len(UpperCamelCase__ ):
UpperCAmelCase_ = min(len(UpperCamelCase__ ) , pos + self.maxlen + 1 ) if text[pos] == "<" else pos + 3
UpperCAmelCase_ = [] # (token_id, token, pos)
for e in range(UpperCamelCase__ , UpperCamelCase__ , -1 ):
UpperCAmelCase_ = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(UpperCamelCase__ ) > 2:
UpperCAmelCase_ = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(UpperCamelCase__ ) > 0:
# the smallest token_id is adopted
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x[0] )[0]
result.append(UpperCamelCase__ )
UpperCAmelCase_ = e
else:
UpperCAmelCase_ = pos + 1
UpperCAmelCase_ = text[pos:end]
if check_simbol(UpperCamelCase__ ):
result.append("<KIGOU>" )
elif checkuae(UpperCamelCase__ ):
result.append("<U2000U2BFF>" )
else:
for i in wd.encode("utf-8" ):
result.append("<|byte%d|>" % i )
UpperCAmelCase_ = end
return result
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__="\n" ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(UpperCamelCase__ ) > 0:
words.append(bytearray(UpperCamelCase__ ).decode("utf-8" , errors="replace" ) )
UpperCAmelCase_ = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word] )
elif word == "<SP>":
words.append(" " )
elif word == "<BR>":
words.append(UpperCamelCase__ )
elif word == "<TAB>":
words.append("\t" )
elif word == "<BLOCK>":
words.append("▀" )
elif word == "<KIGOU>":
words.append("ǀ" )
elif word == "<U2000U2BFF>":
words.append("‖" )
else:
words.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
words.append(bytearray(UpperCamelCase__ ).decode("utf-8" , errors="replace" ) )
UpperCAmelCase_ = "".join(UpperCamelCase__ )
return text
| 660 | 0 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__ ( _lowerCamelCase ):
A_ : Optional[Any] = ['image_processor', 'tokenizer']
A_ : int = 'LayoutLMv2ImageProcessor'
A_ : Optional[Any] = ('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self : str , __UpperCamelCase : Tuple=None , __UpperCamelCase : Union[str, Any]=None , **__UpperCamelCase : Optional[Any] ) -> Any:
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __UpperCamelCase , )
A = kwargs.pop('feature_extractor' )
A = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__UpperCamelCase , __UpperCamelCase )
def __call__( self : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __UpperCamelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __UpperCamelCase : Union[List[List[int]], List[List[List[int]]]] = None , __UpperCamelCase : Optional[Union[List[int], List[List[int]]]] = None , __UpperCamelCase : bool = True , __UpperCamelCase : Union[bool, str, PaddingStrategy] = False , __UpperCamelCase : Union[bool, str, TruncationStrategy] = None , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : int = 0 , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Union[str, TensorType]] = None , **__UpperCamelCase : List[str] , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' )
# first, apply the image processor
A = self.image_processor(images=__UpperCamelCase , return_tensors=__UpperCamelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A = [text] # add batch dimension (as the image processor always adds a batch dimension)
A = features['words']
A = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , stride=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_overflowing_tokens=__UpperCamelCase , return_special_tokens_mask=__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , return_length=__UpperCamelCase , verbose=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , )
# add pixel values
A = features.pop('pixel_values' )
if return_overflowing_tokens is True:
A = self.get_overflowing_images(__UpperCamelCase , encoded_inputs['overflow_to_sample_mapping'] )
A = images
return encoded_inputs
def __UpperCamelCase ( self : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int ) -> str:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
A = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
f''' {len(__UpperCamelCase )} and {len(__UpperCamelCase )}''' )
return images_with_overflow
def __UpperCamelCase ( self : Optional[int] , *__UpperCamelCase : int , **__UpperCamelCase : List[str] ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def __UpperCamelCase ( self : List[str] , *__UpperCamelCase : List[Any] , **__UpperCamelCase : Optional[int] ) -> Dict:
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@property
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __UpperCamelCase , )
return self.image_processor_class
@property
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __UpperCamelCase , )
return self.image_processor | 106 | '''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
__snake_case : Union[str, Any] = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def lowerCamelCase__ ( ):
UpperCAmelCase_ = Github(os.environ["GITHUB_TOKEN"] )
UpperCAmelCase_ = g.get_repo("huggingface/diffusers" )
UpperCAmelCase_ = repo.get_issues(state="open" )
for issue in open_issues:
UpperCAmelCase_ = sorted(issue.get_comments() , key=lambda A_ : i.created_at , reverse=A_ )
UpperCAmelCase_ = comments[0] if len(A_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="closed" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open" )
issue.remove_from_labels("stale" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
issue.add_to_labels("stale" )
if __name__ == "__main__":
main()
| 660 | 0 |
'''simple docstring'''
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
enable_full_determinism()
class lowercase_ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = UNetaDModel
__lowerCAmelCase = "sample"
@property
def __UpperCAmelCase ( self : Dict ) -> Any:
_A = 4
_A = 3
_A = (32, 32)
_A = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
_A = torch.tensor([10] ).to(UpperCamelCase__ )
return {"sample": noise, "timestep": time_step}
@property
def __UpperCAmelCase ( self : Any ) -> Any:
return (3, 32, 32)
@property
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
return (3, 32, 32)
def __UpperCAmelCase ( self : List[Any] ) -> List[str]:
_A = {
'block_out_channels': (32, 64),
'down_block_types': ('DownBlock2D', 'AttnDownBlock2D'),
'up_block_types': ('AttnUpBlock2D', 'UpBlock2D'),
'attention_head_dim': 3,
'out_channels': 3,
'in_channels': 3,
'layers_per_block': 2,
'sample_size': 32,
}
_A = self.dummy_input
return init_dict, inputs_dict
class lowercase_ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = UNetaDModel
__lowerCAmelCase = "sample"
@property
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
_A = 4
_A = 4
_A = (32, 32)
_A = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
_A = torch.tensor([10] ).to(UpperCamelCase__ )
return {"sample": noise, "timestep": time_step}
@property
def __UpperCAmelCase ( self : int ) -> Tuple:
return (4, 32, 32)
@property
def __UpperCAmelCase ( self : int ) -> List[Any]:
return (4, 32, 32)
def __UpperCAmelCase ( self : Any ) -> List[Any]:
_A = {
'sample_size': 32,
'in_channels': 4,
'out_channels': 4,
'layers_per_block': 2,
'block_out_channels': (32, 64),
'attention_head_dim': 32,
'down_block_types': ('DownBlock2D', 'DownBlock2D'),
'up_block_types': ('UpBlock2D', 'UpBlock2D'),
}
_A = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
_A , _A = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update', output_loading_info=UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertEqual(len(loading_info['missing_keys'] ), 0 )
model.to(UpperCamelCase__ )
_A = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda', 'This test is supposed to run on GPU' )
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
_A , _A = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update', output_loading_info=UpperCamelCase__ )
model.to(UpperCamelCase__ )
_A = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda', 'This test is supposed to run on GPU' )
def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
_A , _A = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update', output_loading_info=UpperCamelCase__ )
model_accelerate.to(UpperCamelCase__ )
model_accelerate.eval()
_A = torch.randn(
1, model_accelerate.config.in_channels, model_accelerate.config.sample_size, model_accelerate.config.sample_size, generator=torch.manual_seed(0 ), )
_A = noise.to(UpperCamelCase__ )
_A = torch.tensor([10] * noise.shape[0] ).to(UpperCamelCase__ )
_A = model_accelerate(UpperCamelCase__, UpperCamelCase__ )['sample']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
_A , _A = UNetaDModel.from_pretrained(
'fusing/unet-ldm-dummy-update', output_loading_info=UpperCamelCase__, low_cpu_mem_usage=UpperCamelCase__ )
model_normal_load.to(UpperCamelCase__ )
model_normal_load.eval()
_A = model_normal_load(UpperCamelCase__, UpperCamelCase__ )['sample']
assert torch_all_close(UpperCamelCase__, UpperCamelCase__, rtol=1e-3 )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
_A = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' )
model.eval()
model.to(UpperCamelCase__ )
_A = torch.randn(
1, model.config.in_channels, model.config.sample_size, model.config.sample_size, generator=torch.manual_seed(0 ), )
_A = noise.to(UpperCamelCase__ )
_A = torch.tensor([10] * noise.shape[0] ).to(UpperCamelCase__ )
with torch.no_grad():
_A = model(UpperCamelCase__, UpperCamelCase__ ).sample
_A = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_A = torch.tensor([-13.3_258, -20.1_100, -15.9_873, -17.6_617, -23.0_596, -17.9_419, -13.3_675, -16.1_889, -12.3_800] )
# fmt: on
self.assertTrue(torch_all_close(UpperCamelCase__, UpperCamelCase__, rtol=1e-3 ) )
class lowercase_ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = UNetaDModel
__lowerCAmelCase = "sample"
@property
def __UpperCAmelCase ( self : Any, UpperCamelCase__ : List[Any]=(32, 32) ) -> Optional[Any]:
_A = 4
_A = 3
_A = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
_A = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa, device=UpperCamelCase__ )
return {"sample": noise, "timestep": time_step}
@property
def __UpperCAmelCase ( self : Any ) -> int:
return (3, 32, 32)
@property
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
return (3, 32, 32)
def __UpperCAmelCase ( self : str ) -> Tuple:
_A = {
'block_out_channels': [32, 64, 64, 64],
'in_channels': 3,
'layers_per_block': 1,
'out_channels': 3,
'time_embedding_type': 'fourier',
'norm_eps': 1e-6,
'mid_block_scale_factor': math.sqrt(2.0 ),
'norm_num_groups': None,
'down_block_types': [
'SkipDownBlock2D',
'AttnSkipDownBlock2D',
'SkipDownBlock2D',
'SkipDownBlock2D',
],
'up_block_types': [
'SkipUpBlock2D',
'SkipUpBlock2D',
'AttnSkipUpBlock2D',
'SkipUpBlock2D',
],
}
_A = self.dummy_input
return init_dict, inputs_dict
@slow
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
_A , _A = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256', output_loading_info=UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertEqual(len(loading_info['missing_keys'] ), 0 )
model.to(UpperCamelCase__ )
_A = self.dummy_input
_A = floats_tensor((4, 3) + (2_56, 2_56) ).to(UpperCamelCase__ )
_A = noise
_A = model(**UpperCamelCase__ )
assert image is not None, "Make sure output is not None"
@slow
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
_A = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' )
model.to(UpperCamelCase__ )
_A = 4
_A = 3
_A = (2_56, 2_56)
_A = torch.ones((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
_A = torch.tensor(batch_size * [1e-4] ).to(UpperCamelCase__ )
with torch.no_grad():
_A = model(UpperCamelCase__, UpperCamelCase__ ).sample
_A = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
_A = torch.tensor([-4_842.8_691, -6_499.6_631, -3_800.1_953, -7_978.2_686, -10_980.7_129, -20_028.8_535, 8_148.2_822, 2_342.2_905, 567.7_608] )
# fmt: on
self.assertTrue(torch_all_close(UpperCamelCase__, UpperCamelCase__, rtol=1e-2 ) )
def __UpperCAmelCase ( self : List[str] ) -> Dict:
_A = UNetaDModel.from_pretrained('fusing/ncsnpp-ffhq-ve-dummy-update' )
model.to(UpperCamelCase__ )
_A = 4
_A = 3
_A = (32, 32)
_A = torch.ones((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
_A = torch.tensor(batch_size * [1e-4] ).to(UpperCamelCase__ )
with torch.no_grad():
_A = model(UpperCamelCase__, UpperCamelCase__ ).sample
_A = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
_A = torch.tensor([-0.0_325, -0.0_900, -0.0_869, -0.0_332, -0.0_725, -0.0_270, -0.0_101, 0.0_227, 0.0_256] )
# fmt: on
self.assertTrue(torch_all_close(UpperCamelCase__, UpperCamelCase__, rtol=1e-2 ) )
def __UpperCAmelCase ( self : str ) -> Tuple:
# not required for this model
pass
| 107 | '''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
__snake_case : List[Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowercase_ ( datasets.BuilderConfig ):
a_ = 1_0000
a_ = None
a_ = None
class lowercase_ ( datasets.ArrowBasedBuilder ):
a_ = ParquetConfig
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
UpperCAmelCase_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCamelCase__ , (str, list, tuple) ):
UpperCAmelCase_ = data_files
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCAmelCase_ = [dl_manager.iter_files(UpperCamelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
UpperCAmelCase_ = []
for split_name, files in data_files.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCAmelCase_ = [dl_manager.iter_files(UpperCamelCase__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(UpperCamelCase__ ):
with open(UpperCamelCase__ , "rb" ) as f:
UpperCAmelCase_ = datasets.Features.from_arrow_schema(pq.read_schema(UpperCamelCase__ ) )
break
splits.append(datasets.SplitGenerator(name=UpperCamelCase__ , gen_kwargs={"files": files} ) )
return splits
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> pa.Table:
"""simple docstring"""
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCAmelCase_ = table_cast(UpperCamelCase__ , self.info.features.arrow_schema )
return pa_table
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" )
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCamelCase__ ) ):
with open(UpperCamelCase__ , "rb" ) as f:
UpperCAmelCase_ = pq.ParquetFile(UpperCamelCase__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
UpperCAmelCase_ = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F"""{file_idx}_{batch_idx}""", self._cast_table(UpperCamelCase__ )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(UpperCamelCase__ )}: {e}""" )
raise
| 660 | 0 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[Any] , lowerCamelCase : List[Any] , lowerCamelCase : Tuple=7 , lowerCamelCase : Any=3 , lowerCamelCase : Tuple=18 , lowerCamelCase : Dict=30 , lowerCamelCase : Union[str, Any]=400 , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : Optional[Any]=None , lowerCamelCase : Any=True , ) -> int:
"""simple docstring"""
_UpperCAmelCase = size if size is not None else {"""height""": 18, """width""": 18}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = do_normalize
def lowerCamelCase ( self : Any ) -> str:
"""simple docstring"""
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866_4436_3403_3203, 0.6618_8293_6954_4983, 0.3891_7464_0178_6804],
[-0.6042_5591_4688_1104, -0.0_2295_0088_6052_8469, 0.5423_7973_6900_3296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase = ImageGPTImageProcessor if is_vision_available() else None
def lowerCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = ImageGPTImageProcessingTester(self )
@property
def lowerCamelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , """clusters""" ) )
self.assertTrue(hasattr(lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase , """size""" ) )
self.assertTrue(hasattr(lowerCamelCase , """do_normalize""" ) )
def lowerCamelCase ( self : str ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def lowerCamelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
_UpperCAmelCase = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCamelCase , obj[key] ) )
else:
self.assertEqual(obj[key] , lowerCamelCase )
def lowerCamelCase ( self : str ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = os.path.join(lowerCamelCase , """image_processor.json""" )
image_processor_first.to_json_file(lowerCamelCase )
_UpperCAmelCase = self.image_processing_class.from_json_file(lowerCamelCase ).to_dict()
_UpperCAmelCase = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCamelCase , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCamelCase )
def lowerCamelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowerCamelCase )
_UpperCAmelCase = self.image_processing_class.from_pretrained(lowerCamelCase ).to_dict()
_UpperCAmelCase = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCamelCase , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCamelCase )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def lowerCamelCase ( self : int ) -> List[Any]:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( ) -> Dict:
_UpperCAmelCase = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" )
_UpperCAmelCase = Image.open(dataset[4]["""file"""] )
_UpperCAmelCase = Image.open(dataset[5]["""file"""] )
_UpperCAmelCase = [imagea, imagea]
return images
@require_vision
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCamelCase ( self : Any ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
_UpperCAmelCase = prepare_images()
# test non-batched
_UpperCAmelCase = image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1024) )
_UpperCAmelCase = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowerCamelCase )
# test batched
_UpperCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1024) )
_UpperCAmelCase = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowerCamelCase ) | 108 | '''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case : Tuple = logging.get_logger(__name__)
__snake_case : Tuple = {'''vocab_file''': '''spiece.model'''}
__snake_case : Dict = {
'''vocab_file''': {
'''AI-Sweden/gpt-sw3-126m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-350m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-1.6b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-6.7b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-20b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model''',
}
}
__snake_case : Tuple = {
'''AI-Sweden/gpt-sw3-126m''': 20_48,
'''AI-Sweden/gpt-sw3-350m''': 20_48,
'''AI-Sweden/gpt-sw3-1.6b''': 20_48,
'''AI-Sweden/gpt-sw3-6.7b''': 20_48,
'''AI-Sweden/gpt-sw3-20b''': 20_48,
}
class lowercase_ ( _A ):
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self , UpperCamelCase__ , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> None:
"""simple docstring"""
UpperCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCAmelCase_ = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
UpperCAmelCase_ = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
UpperCAmelCase_ = "<|endoftext|>" if eos_token is None else eos_token
UpperCAmelCase_ = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
UpperCAmelCase_ = unk_token if pad_token is None else pad_token
UpperCAmelCase_ = eos_token if bos_token is None else bos_token
else:
UpperCAmelCase_ = "<pad>" if pad_token is None else pad_token
UpperCAmelCase_ = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = remove_space
UpperCAmelCase_ = keep_accents
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
# Used for whitespace normalization in input texts
# fmt : off
UpperCAmelCase_ = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
UpperCAmelCase_ = re.compile(
F"""[{"".join(map(UpperCamelCase__ , list(range(0 , 9 ) ) + list(range(1_1 , 3_2 ) ) + list(range(1_2_7 , 1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]""" )
def __getstate__( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = self.__dict__.copy()
UpperCAmelCase_ = None
return state
def __setstate__( self , UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
return len(self.sp_model )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
UpperCAmelCase_ = self.non_printing_characters_re.sub("" , UpperCamelCase__ )
# Normalize whitespaces
UpperCAmelCase_ = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
UpperCAmelCase_ = unicodedata.normalize("NFC" , UpperCamelCase__ )
return text
def lowerCamelCase_ ( self , UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = self.preprocess_text(UpperCamelCase__ )
return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> int:
"""simple docstring"""
return self.sp_model.PieceToId(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
return self.sp_model.IdToPiece(UpperCamelCase__ )
@staticmethod
def lowerCamelCase_ ( UpperCamelCase__ ) -> str:
"""simple docstring"""
return out_string
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = ""
UpperCAmelCase_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCamelCase__ ) + token
UpperCAmelCase_ = True
UpperCAmelCase_ = []
else:
current_sub_tokens.append(UpperCamelCase__ )
UpperCAmelCase_ = False
out_string += self.sp_model.decode(UpperCamelCase__ )
return out_string
def lowerCamelCase_ ( self ) -> Dict[str, int]:
"""simple docstring"""
UpperCAmelCase_ = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , "wb" ) as fi:
UpperCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
"""simple docstring"""
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = self.preprocess_text(UpperCamelCase__ )
UpperCAmelCase_ = self.sp_model.encode(UpperCamelCase__ )
else:
UpperCAmelCase_ = [self.preprocess_text(UpperCamelCase__ ) for t in text]
UpperCAmelCase_ = self.sp_model.encode(UpperCamelCase__ )
if return_tensors is True or return_tensors == "pt":
UpperCAmelCase_ = torch.tensor(UpperCamelCase__ )
return token_ids
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
return self.sp_model.decode(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[int]:
"""simple docstring"""
UpperCAmelCase_ = [F"""User: {text}""" if is_user else F"""Bot: {text}""" for is_user, text in conversation.iter_texts()]
UpperCAmelCase_ = (
F"""{self.eos_token}{self.bos_token}""" + F"""{self.bos_token}""".join(UpperCamelCase__ ) + F"""{self.bos_token}Bot:"""
)
return self.encode(text=UpperCamelCase__ )
| 660 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a = {
"configuration_wav2vec2": ["WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Wav2Vec2Config"],
"feature_extraction_wav2vec2": ["Wav2Vec2FeatureExtractor"],
"processing_wav2vec2": ["Wav2Vec2Processor"],
"tokenization_wav2vec2": ["Wav2Vec2CTCTokenizer", "Wav2Vec2Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Wav2Vec2ForAudioFrameClassification",
"Wav2Vec2ForCTC",
"Wav2Vec2ForMaskedLM",
"Wav2Vec2ForPreTraining",
"Wav2Vec2ForSequenceClassification",
"Wav2Vec2ForXVector",
"Wav2Vec2Model",
"Wav2Vec2PreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFWav2Vec2ForCTC",
"TFWav2Vec2Model",
"TFWav2Vec2PreTrainedModel",
"TFWav2Vec2ForSequenceClassification",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"FlaxWav2Vec2ForCTC",
"FlaxWav2Vec2ForPreTraining",
"FlaxWav2Vec2Model",
"FlaxWav2Vec2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 109 | '''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowercase_ ( unittest.TestCase ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__=7 , UpperCamelCase__=3 , UpperCamelCase__=1_8 , UpperCamelCase__=3_0 , UpperCamelCase__=4_0_0 , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=[0.5, 0.5, 0.5] , UpperCamelCase__=[0.5, 0.5, 0.5] , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = size if size is not None else {"shortest_edge": 1_8}
UpperCAmelCase_ = crop_size if crop_size is not None else {"height": 1_8, "width": 1_8}
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
def lowerCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowercase_ ( _A , unittest.TestCase ):
a_ = LevitImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = LevitImageProcessingTester(self )
@property
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , "image_mean" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "image_std" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_resize" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_center_crop" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "size" ) )
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 1_8} )
self.assertEqual(image_processor.crop_size , {"height": 1_8, "width": 1_8} )
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2} )
self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4} )
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
pass
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCamelCase_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 660 | 0 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowercase__ ( _A , _A , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[int] = IFImgaImgSuperResolutionPipeline
A_ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""width""", """height"""}
A_ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""original_image"""} )
A_ : List[str] = PipelineTesterMixin.required_optional_params - {"""latents"""}
def UpperCAmelCase_ ( self ):
return self._get_superresolution_dummy_components()
def UpperCAmelCase_ ( self , __snake_case , __snake_case=0 ):
if str(UpperCamelCase__ ).startswith("""mps""" ):
_SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(UpperCamelCase__ )
else:
_SCREAMING_SNAKE_CASE : List[Any] = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
_SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
_SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
_SCREAMING_SNAKE_CASE : List[str] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCAmelCase_ ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def UpperCAmelCase_ ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def UpperCAmelCase_ ( self ):
super().test_save_load_floataa(expected_max_diff=1e-1 )
def UpperCAmelCase_ ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def UpperCAmelCase_ ( self ):
self._test_save_load_local()
def UpperCAmelCase_ ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 533 | '''simple docstring'''
from __future__ import annotations
import math
from collections.abc import Callable
def lowerCamelCase__ ( A_ , A_ , A_ , A_ = 100 , ):
UpperCAmelCase_ = x_start
UpperCAmelCase_ = fnc(A_ )
UpperCAmelCase_ = 0.0
for _ in range(A_ ):
# Approximates curve as a sequence of linear lines and sums their length
UpperCAmelCase_ = (x_end - x_start) / steps + xa
UpperCAmelCase_ = fnc(A_ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
UpperCAmelCase_ = xa
UpperCAmelCase_ = fxa
return length
if __name__ == "__main__":
def lowerCamelCase__ ( A_ ):
return math.sin(10 * x )
print('''f(x) = sin(10 * x)''')
print('''The length of the curve from x = -10 to x = 10 is:''')
__snake_case : List[Any] = 10
while i <= 10_00_00:
print(F'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 660 | 0 |
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
__SCREAMING_SNAKE_CASE =getLogger(__name__)
def a (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 8 , _lowerCAmelCase = 1_0_2_4 , _lowerCAmelCase="val" , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase="summarization" , _lowerCAmelCase=None , _lowerCAmelCase=1 , _lowerCAmelCase = None , _lowerCAmelCase="" , **_lowerCAmelCase , ):
SCREAMING_SNAKE_CASE_ = str(A_ )
assert local_rank is not None
torch.distributed.init_process_group(backend='''nccl''' , rank=A_ )
SCREAMING_SNAKE_CASE_ = Path(A_ )
SCREAMING_SNAKE_CASE_ = save_dir.joinpath(F"rank_{local_rank}_output.json" )
torch.cuda.set_device(A_ )
SCREAMING_SNAKE_CASE_ = AutoModelForSeqaSeqLM.from_pretrained(A_ ).cuda()
if fpaa:
SCREAMING_SNAKE_CASE_ = model.half()
# determine if we need to increase num_beams
use_task_specific_params(A_ , A_ ) # update config with task specific params
SCREAMING_SNAKE_CASE_ = generate_kwargs.pop('''num_beams''' , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
SCREAMING_SNAKE_CASE_ = num_return_sequences
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained(A_ )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
if max_source_length is None:
SCREAMING_SNAKE_CASE_ = tokenizer.model_max_length
if prefix is None:
SCREAMING_SNAKE_CASE_ = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
SCREAMING_SNAKE_CASE_ = SeqaSeqDataset(
A_ , A_ , A_ , max_target_length=1_0_2_4 , type_path=A_ , n_obs=A_ , prefix=A_ , **A_ , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
SCREAMING_SNAKE_CASE_ = ds.make_sortish_sampler(A_ , distributed=A_ , add_extra_examples=A_ , shuffle=A_ )
SCREAMING_SNAKE_CASE_ = DataLoader(A_ , sampler=A_ , batch_size=A_ , collate_fn=ds.collate_fn )
SCREAMING_SNAKE_CASE_ = []
for batch in tqdm(A_ ):
SCREAMING_SNAKE_CASE_ = model.generate(
input_ids=batch['''input_ids'''].to(model.device ) , attention_mask=batch['''attention_mask'''].to(model.device ) , num_return_sequences=A_ , num_beams=A_ , **A_ , )
SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(A_ , skip_special_tokens=A_ , clean_up_tokenization_spaces=A_ )
SCREAMING_SNAKE_CASE_ = batch['''ids''']
if num_return_sequences > 1:
SCREAMING_SNAKE_CASE_ = chunks(A_ , A_ ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(A_ ):
results.append({'''pred''': pred, '''id''': ids[i].item()} )
save_json(A_ , A_ )
return results, sampler.num_replicas
def a ():
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser(
epilog='''Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate''' )
parser.add_argument('''--data_dir''' , type=A_ , help='''like cnn_dm/test.source''' )
parser.add_argument(
'''--model_name''' , type=A_ , help='''like facebook/bart-large-cnn,t5-base, etc.''' , default='''sshleifer/distilbart-xsum-12-3''' , )
parser.add_argument('''--save_dir''' , type=A_ , help='''where to save''' , default='''tmp_gen''' )
parser.add_argument('''--max_source_length''' , type=A_ , default=A_ )
parser.add_argument(
'''--type_path''' , type=A_ , default='''test''' , help='''which subset to evaluate typically train/val/test''' )
parser.add_argument('''--task''' , type=A_ , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=A_ , default=8 , required=A_ , help='''batch size''' )
parser.add_argument(
'''--local_rank''' , type=A_ , default=-1 , required=A_ , help='''should be passed by distributed.launch''' )
parser.add_argument(
'''--n_obs''' , type=A_ , default=A_ , required=A_ , help='''How many observations. Defaults to all.''' )
parser.add_argument(
'''--num_return_sequences''' , type=A_ , default=1 , required=A_ , help='''How many sequences to return''' )
parser.add_argument(
'''--sync_timeout''' , type=A_ , default=6_0_0 , required=A_ , help='''How long should master process wait for other processes to finish.''' , )
parser.add_argument('''--src_lang''' , type=A_ , default=A_ , required=A_ )
parser.add_argument('''--tgt_lang''' , type=A_ , default=A_ , required=A_ )
parser.add_argument(
'''--prefix''' , type=A_ , required=A_ , default=A_ , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--debug''' , action='''store_true''' )
SCREAMING_SNAKE_CASE_ = time.time()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = parser.parse_known_args()
SCREAMING_SNAKE_CASE_ = parse_numeric_n_bool_cl_kwargs(A_ )
if generate_kwargs and args.local_rank <= 0:
print(F"parsed the following generate kwargs: {generate_kwargs}" )
SCREAMING_SNAKE_CASE_ = Path(args.save_dir + '''_tmp''' )
Path(A_ ).mkdir(exist_ok=A_ ) # this handles locking.
SCREAMING_SNAKE_CASE_ = list(json_save_dir.glob('''rank_*.json''' ) )
if intermediate_files:
raise ValueError(F"Found files at {json_save_dir} please move or remove them." )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
SCREAMING_SNAKE_CASE_ = {}
if args.src_lang is not None:
SCREAMING_SNAKE_CASE_ = args.src_lang
if args.tgt_lang is not None:
SCREAMING_SNAKE_CASE_ = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=A_ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = eval_data_dir(
args.data_dir , A_ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=A_ , **A_ , )
if args.local_rank <= 0:
SCREAMING_SNAKE_CASE_ = Path(args.save_dir )
save_dir.mkdir(exist_ok=A_ )
SCREAMING_SNAKE_CASE_ = gather_results_from_each_node(A_ , A_ , args.sync_timeout )
SCREAMING_SNAKE_CASE_ = combine_partial_results(A_ )
if args.num_return_sequences > 1:
SCREAMING_SNAKE_CASE_ = save_dir.joinpath('''pseudolabel_results.json''' )
print(F"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" )
save_json(A_ , A_ )
return
SCREAMING_SNAKE_CASE_ = Path(args.data_dir ).joinpath(args.type_path + '''.target''' )
with open(A_ ) as f:
SCREAMING_SNAKE_CASE_ = [x.rstrip() for x in f.readlines()][: len(A_ )]
# Calculate metrics, save metrics, and save _generations.txt
SCREAMING_SNAKE_CASE_ = '''translation''' in args.task
SCREAMING_SNAKE_CASE_ = calculate_bleu if calc_bleu else calculate_rouge
SCREAMING_SNAKE_CASE_ = '''bleu''' if calc_bleu else '''rouge'''
SCREAMING_SNAKE_CASE_ = score_fn(A_ , A_ )
SCREAMING_SNAKE_CASE_ = len(A_ )
SCREAMING_SNAKE_CASE_ = time.time() - start_time
SCREAMING_SNAKE_CASE_ = round(runtime / metrics['''n_obs'''] , 4 )
SCREAMING_SNAKE_CASE_ = num_replicas
# TODO(@stas00): add whatever metadata to metrics
SCREAMING_SNAKE_CASE_ = save_dir.joinpath(F"{args.type_path}_{metric_name}.json" )
save_json(A_ , A_ , indent=A_ )
print(A_ )
write_txt_file(A_ , save_dir.joinpath(F"{args.type_path}_generations.txt" ) )
if args.debug:
write_txt_file(A_ , save_dir.joinpath(F"{args.type_path}.target" ) )
else:
shutil.rmtree(A_ )
def a (_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = []
for partial_result in partial_results:
records.extend(A_ )
SCREAMING_SNAKE_CASE_ = sorted(A_ , key=lambda _lowerCAmelCase : x["id"] )
SCREAMING_SNAKE_CASE_ = [x['''pred'''] for x in records]
return preds
def a (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
# WAIT FOR lots of .json files
SCREAMING_SNAKE_CASE_ = time.time()
logger.info('''waiting for all nodes to finish''' )
SCREAMING_SNAKE_CASE_ = None
while (time.time() - start_wait) < timeout:
SCREAMING_SNAKE_CASE_ = list(save_dir.glob('''rank_*.json''' ) )
if len(A_ ) < num_replicas:
continue
try:
# make sure all json files are fully saved
SCREAMING_SNAKE_CASE_ = lmap(A_ , A_ )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError('''Rank 0 gave up on waiting for other processes''' )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 234 | '''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class lowercase_ ( _A ):
a_ = """"""
a_ = """hf-legacy""" # "hf://"" is reserved for hffs
def __init__( self , UpperCamelCase__ = None , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> Tuple:
"""simple docstring"""
super().__init__(self , **UpperCamelCase__ )
UpperCAmelCase_ = repo_info
UpperCAmelCase_ = token
UpperCAmelCase_ = None
def lowerCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
if self.dir_cache is None:
UpperCAmelCase_ = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
UpperCAmelCase_ = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(UpperCamelCase__ ): {"name": str(UpperCamelCase__ ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = "rb" , **UpperCamelCase__ , ) -> Optional[int]:
"""simple docstring"""
if not isinstance(self.repo_info , UpperCamelCase__ ):
raise NotImplementedError(F"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
UpperCAmelCase_ = hf_hub_url(self.repo_info.id , UpperCamelCase__ , revision=self.repo_info.sha )
return fsspec.open(
UpperCamelCase__ , mode=UpperCamelCase__ , headers=get_authentication_headers_for_url(UpperCamelCase__ , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open()
def lowerCamelCase_ ( self , UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
"""simple docstring"""
self._get_dirs()
UpperCAmelCase_ = self._strip_protocol(UpperCamelCase__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__=False , **UpperCamelCase__ ) -> str:
"""simple docstring"""
self._get_dirs()
UpperCAmelCase_ = PurePosixPath(path.strip("/" ) )
UpperCAmelCase_ = {}
for p, f in self.dir_cache.items():
UpperCAmelCase_ = PurePosixPath(p.strip("/" ) )
UpperCAmelCase_ = p.parent
if root == path:
UpperCAmelCase_ = f
UpperCAmelCase_ = list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out )
| 660 | 0 |
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
__UpperCAmelCase = '''bert-base-cased'''
__UpperCAmelCase = '''google/pegasus-xsum'''
__UpperCAmelCase = [''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
__UpperCAmelCase = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
__UpperCAmelCase = '''patrickvonplaten/t5-tiny-random'''
__UpperCAmelCase = '''sshleifer/bart-tiny-random'''
__UpperCAmelCase = '''sshleifer/tiny-mbart'''
__UpperCAmelCase = '''sshleifer/tiny-marian-en-de'''
def lowercase__ ( __snake_case : Optional[int] , __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = '\n'.join(A_ )
Path(A_ ).open('w' ).writelines(A_ )
def lowercase__ ( __snake_case : Any ):
'''simple docstring'''
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(A_ , F"{split}.source" ) , A_ )
_dump_articles(os.path.join(A_ , F"{split}.target" ) , A_ )
return tmp_dir
class lowerCamelCase (_A ):
'''simple docstring'''
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Dict:
UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase__ )
UpperCAmelCase_ : List[str] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
UpperCAmelCase_ : Union[str, Any] = max(len(tokenizer.encode(UpperCamelCase__ ) ) for a in ARTICLES )
UpperCAmelCase_ : Union[str, Any] = max(len(tokenizer.encode(UpperCamelCase__ ) ) for a in SUMMARIES )
UpperCAmelCase_ : Tuple = 4
UpperCAmelCase_ : Any = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = 'ro_RO', 'de_DE' # ignored for all but mbart, but never causes error.
UpperCAmelCase_ : int = SeqaSeqDataset(
UpperCamelCase__ , data_dir=UpperCamelCase__ , type_path='train' , max_source_length=UpperCamelCase__ , max_target_length=UpperCamelCase__ , src_lang=UpperCamelCase__ , tgt_lang=UpperCamelCase__ , )
UpperCAmelCase_ : Dict = DataLoader(UpperCamelCase__ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
UpperCAmelCase_ : Union[str, Any] = shift_tokens_right(batch['labels'] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> int:
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase__ )
UpperCAmelCase_ : List[Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
UpperCAmelCase_ : List[Any] = max(len(tokenizer.encode(UpperCamelCase__ ) ) for a in ARTICLES )
UpperCAmelCase_ : List[Any] = max(len(tokenizer.encode(UpperCamelCase__ ) ) for a in SUMMARIES )
UpperCAmelCase_ : Dict = 4
UpperCAmelCase_ : Any = LegacySeqaSeqDataset(
UpperCamelCase__ , data_dir=UpperCamelCase__ , type_path='train' , max_source_length=2_0 , max_target_length=UpperCamelCase__ , )
UpperCAmelCase_ : List[Any] = DataLoader(UpperCamelCase__ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 2_0 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained('facebook/mbart-large-cc25' )
UpperCAmelCase_ : Optional[int] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
UpperCAmelCase_ : Any = tmp_dir.joinpath('train.source' ).open().readlines()
UpperCAmelCase_ : List[str] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(UpperCamelCase__ , UpperCamelCase__ , 1_2_8 , UpperCamelCase__ )
UpperCAmelCase_ : Dict = {x.name for x in tmp_dir.iterdir()}
UpperCAmelCase_ : Union[str, Any] = {x.name for x in save_dir.iterdir()}
UpperCAmelCase_ : Optional[int] = save_dir.joinpath('train.source' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(UpperCamelCase__ ) < len(UpperCamelCase__ )
assert len(UpperCamelCase__ ) == 1
assert len(packed_examples[0] ) == sum(len(UpperCamelCase__ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='This test requires fairseq' )
def __UpperCAmelCase ( self ) -> List[Any]:
if not FAIRSEQ_AVAILABLE:
return
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self._get_dataset(max_len=6_4 )
UpperCAmelCase_ : Tuple = 6_4
UpperCAmelCase_ : Optional[int] = ds.make_dynamic_sampler(UpperCamelCase__ , required_batch_size_multiple=UpperCamelCase__ )
UpperCAmelCase_ : Any = [len(UpperCamelCase__ ) for x in batch_sampler]
assert len(set(UpperCamelCase__ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(UpperCamelCase__ ) == len(UpperCamelCase__ ) # no dropped or added examples
UpperCAmelCase_ : int = DataLoader(UpperCamelCase__ , batch_sampler=UpperCamelCase__ , collate_fn=ds.collate_fn , num_workers=2 )
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : Union[str, Any] = []
for batch in data_loader:
UpperCAmelCase_ : str = batch['input_ids'].shape
UpperCAmelCase_ : Optional[int] = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
UpperCAmelCase_ : Dict = np.product(batch['input_ids'].shape )
num_src_per_batch.append(UpperCamelCase__ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(UpperCamelCase__ )
assert num_src_per_batch[0] == max(UpperCamelCase__ )
if failures:
raise AssertionError(f"too many tokens in {len(UpperCamelCase__ )} batches" )
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = self._get_dataset(max_len=5_1_2 )
UpperCAmelCase_ : Optional[Any] = 2
UpperCAmelCase_ : List[Any] = ds.make_sortish_sampler(UpperCamelCase__ , shuffle=UpperCamelCase__ )
UpperCAmelCase_ : Union[str, Any] = DataLoader(UpperCamelCase__ , batch_size=UpperCamelCase__ , collate_fn=ds.collate_fn , num_workers=2 )
UpperCAmelCase_ : List[Any] = DataLoader(UpperCamelCase__ , batch_size=UpperCamelCase__ , collate_fn=ds.collate_fn , num_workers=2 , sampler=UpperCamelCase__ )
UpperCAmelCase_ : str = tokenizer.pad_token_id
def count_pad_tokens(_UpperCamelCase , _UpperCamelCase="input_ids" ):
return [batch[k].eq(UpperCamelCase__ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(UpperCamelCase__ , k='labels' ) ) < sum(count_pad_tokens(UpperCamelCase__ , k='labels' ) )
assert sum(count_pad_tokens(UpperCamelCase__ ) ) < sum(count_pad_tokens(UpperCamelCase__ ) )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ )
def __UpperCAmelCase ( self , _UpperCamelCase=1_0_0_0 , _UpperCamelCase=1_2_8 ) -> int:
if os.getenv('USE_REAL_DATA' , UpperCamelCase__ ):
UpperCAmelCase_ : Optional[Any] = 'examples/seq2seq/wmt_en_ro'
UpperCAmelCase_ : Any = max_len * 2 * 6_4
if not Path(UpperCamelCase__ ).joinpath('train.len' ).exists():
save_len_file(UpperCamelCase__ , UpperCamelCase__ )
else:
UpperCAmelCase_ : Any = 'examples/seq2seq/test_data/wmt_en_ro'
UpperCAmelCase_ : Any = max_len * 4
save_len_file(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase_ : int = AutoTokenizer.from_pretrained(UpperCamelCase__ )
UpperCAmelCase_ : List[str] = SeqaSeqDataset(
UpperCamelCase__ , data_dir=UpperCamelCase__ , type_path='train' , max_source_length=UpperCamelCase__ , max_target_length=UpperCamelCase__ , n_obs=UpperCamelCase__ , )
return ds, max_tokens, tokenizer
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self._get_dataset()
UpperCAmelCase_ : Optional[int] = set(DistributedSortishSampler(UpperCamelCase__ , 2_5_6 , num_replicas=2 , rank=0 , add_extra_examples=UpperCamelCase__ ) )
UpperCAmelCase_ : List[Any] = set(DistributedSortishSampler(UpperCamelCase__ , 2_5_6 , num_replicas=2 , rank=1 , add_extra_examples=UpperCamelCase__ ) )
assert idsa.intersection(UpperCamelCase__ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[int]:
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained(UpperCamelCase__ , use_fast=UpperCamelCase__ )
if tok_name == MBART_TINY:
UpperCAmelCase_ : Union[str, Any] = SeqaSeqDataset(
UpperCamelCase__ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , src_lang='EN' , tgt_lang='FR' , )
UpperCAmelCase_ : Optional[Any] = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
UpperCAmelCase_ : Dict = SeqaSeqDataset(
UpperCamelCase__ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , )
UpperCAmelCase_ : Dict = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(UpperCamelCase__ ) == 1 if tok_name == BART_TINY else len(UpperCamelCase__ ) == 0
| 406 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case : Union[str, Any] = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = ['''PLBartTokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[int] = [
'''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PLBartForCausalLM''',
'''PLBartForConditionalGeneration''',
'''PLBartForSequenceClassification''',
'''PLBartModel''',
'''PLBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
__snake_case : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 660 | 0 |
"""simple docstring"""
def _UpperCamelCase ( UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
assert (
isinstance(A_ , A_ ) and number_of_steps > 0
), f"number_of_steps needs to be positive integer, your input {number_of_steps}"
if number_of_steps == 1:
return 1
__UpperCAmelCase , __UpperCAmelCase : int = 1, 1
for _ in range(number_of_steps - 1 ):
__UpperCAmelCase , __UpperCAmelCase : int = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 77 | '''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__snake_case : List[str] = logging.get_logger(__name__)
class lowercase_ ( _A ):
a_ = """linear"""
a_ = """cosine"""
a_ = """cosine_with_restarts"""
a_ = """polynomial"""
a_ = """constant"""
a_ = """constant_with_warmup"""
a_ = """piecewise_constant"""
def lowerCamelCase__ ( A_ , A_ = -1 ):
return LambdaLR(A_ , lambda A_ : 1 , last_epoch=A_ )
def lowerCamelCase__ ( A_ , A_ , A_ = -1 ):
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1.0 , A_ ) )
return 1.0
return LambdaLR(A_ , A_ , last_epoch=A_ )
def lowerCamelCase__ ( A_ , A_ , A_ = -1 ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = step_rules.split("," )
for rule_str in rule_list[:-1]:
UpperCAmelCase_ , UpperCAmelCase_ = rule_str.split(":" )
UpperCAmelCase_ = int(A_ )
UpperCAmelCase_ = float(A_ )
UpperCAmelCase_ = value
UpperCAmelCase_ = float(rule_list[-1] )
def create_rules_function(A_ , A_ ):
def rule_func(A_ ) -> float:
UpperCAmelCase_ = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(A_ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
UpperCAmelCase_ = create_rules_function(A_ , A_ )
return LambdaLR(A_ , A_ , last_epoch=A_ )
def lowerCamelCase__ ( A_ , A_ , A_ , A_=-1 ):
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1 , A_ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(A_ , A_ , A_ )
def lowerCamelCase__ ( A_ , A_ , A_ , A_ = 0.5 , A_ = -1 ):
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1 , A_ ) )
UpperCAmelCase_ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(A_ ) * 2.0 * progress )) )
return LambdaLR(A_ , A_ , A_ )
def lowerCamelCase__ ( A_ , A_ , A_ , A_ = 1 , A_ = -1 ):
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1 , A_ ) )
UpperCAmelCase_ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(A_ ) * progress) % 1.0) )) )
return LambdaLR(A_ , A_ , A_ )
def lowerCamelCase__ ( A_ , A_ , A_ , A_=1e-7 , A_=1.0 , A_=-1 ):
UpperCAmelCase_ = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1 , A_ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
UpperCAmelCase_ = lr_init - lr_end
UpperCAmelCase_ = num_training_steps - num_warmup_steps
UpperCAmelCase_ = 1 - (current_step - num_warmup_steps) / decay_steps
UpperCAmelCase_ = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(A_ , A_ , A_ )
__snake_case : str = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowerCamelCase__ ( A_ , A_ , A_ = None , A_ = None , A_ = None , A_ = 1 , A_ = 1.0 , A_ = -1 , ):
UpperCAmelCase_ = SchedulerType(A_ )
UpperCAmelCase_ = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(A_ , last_epoch=A_ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(A_ , step_rules=A_ , last_epoch=A_ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(A_ , num_warmup_steps=A_ , last_epoch=A_ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
A_ , num_warmup_steps=A_ , num_training_steps=A_ , num_cycles=A_ , last_epoch=A_ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
A_ , num_warmup_steps=A_ , num_training_steps=A_ , power=A_ , last_epoch=A_ , )
return schedule_func(
A_ , num_warmup_steps=A_ , num_training_steps=A_ , last_epoch=A_ )
| 660 | 0 |
"""simple docstring"""
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
if isinstance(A_ , A_ ):
__SCREAMING_SNAKE_CASE = np.full((len(A_ ), sequence_length, 2) , A_ )
else:
__SCREAMING_SNAKE_CASE = np.full((len(A_ ), sequence_length) , A_ )
for i, tensor in enumerate(A_ ):
if padding_side == "right":
if isinstance(A_ , A_ ):
__SCREAMING_SNAKE_CASE = tensor[:sequence_length]
else:
__SCREAMING_SNAKE_CASE = tensor[:sequence_length]
else:
if isinstance(A_ , A_ ):
__SCREAMING_SNAKE_CASE = tensor[:sequence_length]
else:
__SCREAMING_SNAKE_CASE = tensor[:sequence_length]
return out_tensor.tolist()
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ord(A_ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
__SCREAMING_SNAKE_CASE = unicodedata.category(A_ )
if cat.startswith("P" ):
return True
return False
@dataclass
class UpperCamelCase_ ( _A):
"""simple docstring"""
snake_case__ : str = 42
snake_case__ : Any = True
snake_case__ : int = None
snake_case__ : int = None
snake_case__ : Any = -100
snake_case__ : Dict = "pt"
def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : int ) -> str:
import torch
__SCREAMING_SNAKE_CASE = "label" if "label" in features[0].keys() else "labels"
__SCREAMING_SNAKE_CASE = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
__SCREAMING_SNAKE_CASE = self.tokenizer.pad(
UpperCamelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" if labels is None else None , )
if labels is None:
return batch
__SCREAMING_SNAKE_CASE = torch.tensor(batch["entity_ids"] ).shape[1]
__SCREAMING_SNAKE_CASE = self.tokenizer.padding_side
if padding_side == "right":
__SCREAMING_SNAKE_CASE = [
list(UpperCamelCase__ ) + [self.label_pad_token_id] * (sequence_length - len(UpperCamelCase__ )) for label in labels
]
else:
__SCREAMING_SNAKE_CASE = [
[self.label_pad_token_id] * (sequence_length - len(UpperCamelCase__ )) + list(UpperCamelCase__ ) for label in labels
]
__SCREAMING_SNAKE_CASE = [feature["ner_tags"] for feature in features]
__SCREAMING_SNAKE_CASE = padding_tensor(UpperCamelCase__ , -1 , UpperCamelCase__ , UpperCamelCase__ )
__SCREAMING_SNAKE_CASE = [feature["original_entity_spans"] for feature in features]
__SCREAMING_SNAKE_CASE = padding_tensor(UpperCamelCase__ , (-1, -1) , UpperCamelCase__ , UpperCamelCase__ )
__SCREAMING_SNAKE_CASE = {k: torch.tensor(UpperCamelCase__ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 682 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case : Optional[int] = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[int] = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[Any] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
__snake_case : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 660 | 0 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , snake_case_=2 , snake_case_=3 , snake_case_=64 , snake_case_=None ):
'''simple docstring'''
A__ : Any = np.random.default_rng(UpperCamelCase__ )
A__ : List[str] = length
A__ : Any = rng.normal(size=(length,) ).astype(np.floataa )
A__ : Union[str, Any] = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ):
'''simple docstring'''
return self.length
def __getitem__( self , snake_case_ ):
'''simple docstring'''
return {"x": self.x[i], "y": self.y[i]}
class __UpperCAmelCase (torch.nn.Module ):
'''simple docstring'''
def __init__( self , snake_case_=0 , snake_case_=0 , snake_case_=False ):
'''simple docstring'''
super().__init__()
A__ : Optional[int] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
A__ : Any = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
A__ : Dict = True
def lowerCamelCase ( self , snake_case_=None ):
'''simple docstring'''
if self.first_batch:
print(F'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
A__ : Union[str, Any] = False
return x * self.a[0] + self.b[0]
class __UpperCAmelCase (torch.nn.Module ):
'''simple docstring'''
def __init__( self , snake_case_=0 , snake_case_=0 , snake_case_=False ):
'''simple docstring'''
super().__init__()
A__ : Dict = torch.nn.Parameter(torch.tensor(UpperCamelCase__ ).float() )
A__ : str = torch.nn.Parameter(torch.tensor(UpperCamelCase__ ).float() )
A__ : Tuple = True
def lowerCamelCase ( self , snake_case_=None ):
'''simple docstring'''
if self.first_batch:
print(F'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
A__ : Any = False
return x * self.a + self.b
def _A( lowerCAmelCase , lowerCAmelCase = 16 ):
from datasets import load_dataset
from transformers import AutoTokenizer
A__ : Any = AutoTokenizer.from_pretrained("""bert-base-cased""" )
A__ : int = {"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""}
A__ : Tuple = load_dataset("""csv""" , data_files=A_ )
A__ : Any = datasets["""train"""].unique("""label""" )
A__ : int = {v: i for i, v in enumerate(A_ )}
def tokenize_function(lowerCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
A__ : str = tokenizer(
examples["""sentence1"""] , examples["""sentence2"""] , truncation=A_ , max_length=A_ , padding="""max_length""" )
if "label" in examples:
A__ : Optional[int] = [label_to_id[l] for l in examples["""label"""]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A__ : List[str] = datasets.map(
A_ , batched=A_ , remove_columns=["""sentence1""", """sentence2""", """label"""] , )
def collate_fn(lowerCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A_ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(A_ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
A__ : int = DataLoader(tokenized_datasets["""train"""] , shuffle=A_ , collate_fn=A_ , batch_size=2 )
A__ : int = DataLoader(tokenized_datasets["""validation"""] , shuffle=A_ , collate_fn=A_ , batch_size=1 )
return train_dataloader, eval_dataloader
| 363 | '''simple docstring'''
import csv
import tweepy
# Twitter API credentials
__snake_case : Union[str, Any] = ''''''
__snake_case : List[Any] = ''''''
__snake_case : List[str] = ''''''
__snake_case : Any = ''''''
def lowerCamelCase__ ( A_ ):
# authorize twitter, initialize tweepy
UpperCAmelCase_ = tweepy.OAuthHandler(A_ , A_ )
auth.set_access_token(A_ , A_ )
UpperCAmelCase_ = tweepy.API(A_ )
# initialize a list to hold all the tweepy Tweets
UpperCAmelCase_ = []
# make initial request for most recent tweets (200 is the maximum allowed count)
UpperCAmelCase_ = api.user_timeline(screen_name=A_ , count=200 )
# save most recent tweets
alltweets.extend(A_ )
# save the id of the oldest tweet less one
UpperCAmelCase_ = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(A_ ) > 0:
print(F"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
UpperCAmelCase_ = api.user_timeline(
screen_name=A_ , count=200 , max_id=A_ )
# save most recent tweets
alltweets.extend(A_ )
# update the id of the oldest tweet less one
UpperCAmelCase_ = alltweets[-1].id - 1
print(F"""...{len(A_ )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
UpperCAmelCase_ = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"""new_{screen_name}_tweets.csv""" , "w" ) as f:
UpperCAmelCase_ = csv.writer(A_ )
writer.writerow(["id", "created_at", "text"] )
writer.writerows(A_ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''')
| 660 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : List[str] = logging.get_logger(__name__)
a : List[Any] = {
'''kssteven/ibert-roberta-base''': '''https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json''',
'''kssteven/ibert-roberta-large''': '''https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json''',
'''kssteven/ibert-roberta-large-mnli''': (
'''https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'''
),
}
class _lowercase ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE: Optional[int] = 'ibert'
def __init__( self , lowerCamelCase__=30_522 , lowerCamelCase__=768 , lowerCamelCase__=12 , lowerCamelCase__=12 , lowerCamelCase__=3_072 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=2 , lowerCamelCase__=0.0_2 , lowerCamelCase__=1E-12 , lowerCamelCase__=1 , lowerCamelCase__=0 , lowerCamelCase__=2 , lowerCamelCase__="absolute" , lowerCamelCase__=False , lowerCamelCase__="none" , **lowerCamelCase__ , ):
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
lowerCAmelCase_: Tuple = vocab_size
lowerCAmelCase_: Optional[int] = hidden_size
lowerCAmelCase_: Dict = num_hidden_layers
lowerCAmelCase_: str = num_attention_heads
lowerCAmelCase_: str = hidden_act
lowerCAmelCase_: List[str] = intermediate_size
lowerCAmelCase_: int = hidden_dropout_prob
lowerCAmelCase_: List[str] = attention_probs_dropout_prob
lowerCAmelCase_: Dict = max_position_embeddings
lowerCAmelCase_: List[str] = type_vocab_size
lowerCAmelCase_: Any = initializer_range
lowerCAmelCase_: List[str] = layer_norm_eps
lowerCAmelCase_: Tuple = position_embedding_type
lowerCAmelCase_: List[Any] = quant_mode
lowerCAmelCase_: str = force_dequant
class _lowercase ( _A ):
'''simple docstring'''
@property
def _a ( self ):
if self.task == "multiple-choice":
lowerCAmelCase_: Dict = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCAmelCase_: Any = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] ) | 613 | '''simple docstring'''
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
__snake_case : int = logging.get_logger(__name__)
class lowercase_ ( _A ):
def __init__( self , **UpperCamelCase__ ) -> Tuple:
"""simple docstring"""
requires_backends(self , ["bs4"] )
super().__init__(**UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
UpperCAmelCase_ = parent.find_all(child.name , recursive=UpperCamelCase__ )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(UpperCamelCase__ ) else next(i for i, s in enumerate(UpperCamelCase__ , 1 ) if s is child ) )
UpperCAmelCase_ = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = BeautifulSoup(UpperCamelCase__ , "html.parser" )
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for element in html_code.descendants:
if type(UpperCamelCase__ ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
UpperCAmelCase_ = html.unescape(UpperCamelCase__ ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = self.xpath_soup(UpperCamelCase__ )
stringaxtag_seq.append(UpperCamelCase__ )
stringaxsubs_seq.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError("Number of doc strings and xtags does not correspond" )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError("Number of doc strings and xsubs does not correspond" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = ""
for tagname, subs in zip(UpperCamelCase__ , UpperCamelCase__ ):
xpath += F"""/{tagname}"""
if subs != 0:
xpath += F"""[{subs}]"""
return xpath
def __call__( self , UpperCamelCase__ ) -> BatchFeature:
"""simple docstring"""
UpperCAmelCase_ = False
# Check that strings has a valid type
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = True
elif isinstance(UpperCamelCase__ , (list, tuple) ):
if len(UpperCamelCase__ ) == 0 or isinstance(html_strings[0] , UpperCamelCase__ ):
UpperCAmelCase_ = True
if not valid_strings:
raise ValueError(
"HTML strings must of type `str`, `List[str]` (batch of examples), "
F"""but is of type {type(UpperCamelCase__ )}.""" )
UpperCAmelCase_ = bool(isinstance(UpperCamelCase__ , (list, tuple) ) and (isinstance(html_strings[0] , UpperCamelCase__ )) )
if not is_batched:
UpperCAmelCase_ = [html_strings]
# Get nodes + xpaths
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for html_string in html_strings:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.get_three_from_single(UpperCamelCase__ )
nodes.append(UpperCamelCase__ )
UpperCAmelCase_ = []
for node, tag_list, sub_list in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = self.construct_xpath(UpperCamelCase__ , UpperCamelCase__ )
xpath_strings.append(UpperCamelCase__ )
xpaths.append(UpperCamelCase__ )
# return as Dict
UpperCAmelCase_ = {"nodes": nodes, "xpaths": xpaths}
UpperCAmelCase_ = BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
return encoded_inputs
| 660 | 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a :Any = logging.get_logger(__name__)
a :Union[str, Any] = {
'''microsoft/git-base''': '''https://huggingface.co/microsoft/git-base/resolve/main/config.json''',
}
class __a (_A):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :List[str] = """git_vision_model"""
def __init__( self , _a=768 , _a=3_072 , _a=12 , _a=12 , _a=3 , _a=224 , _a=16 , _a="quick_gelu" , _a=1E-5 , _a=0.0 , _a=0.02 , **_a , ) -> Tuple:
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : int = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE__ : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE__ : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : str = num_channels
SCREAMING_SNAKE_CASE__ : Optional[int] = patch_size
SCREAMING_SNAKE_CASE__ : List[str] = image_size
SCREAMING_SNAKE_CASE__ : int = initializer_range
SCREAMING_SNAKE_CASE__ : Any = attention_dropout
SCREAMING_SNAKE_CASE__ : str = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_act
@classmethod
def _a ( cls , _a , **_a ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""" ) == "git":
SCREAMING_SNAKE_CASE__ : Optional[Any] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
class __a (_A):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :int = """git"""
def __init__( self , _a=None , _a=30_522 , _a=768 , _a=6 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=1_024 , _a=0.02 , _a=1E-1_2 , _a=0 , _a="absolute" , _a=True , _a=False , _a=101 , _a=102 , _a=None , **_a , ) -> List[Any]:
"""simple docstring"""
super().__init__(bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
if vision_config is None:
SCREAMING_SNAKE_CASE__ : List[Any] = {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = GitVisionConfig(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[str] = vocab_size
SCREAMING_SNAKE_CASE__ : Dict = hidden_size
SCREAMING_SNAKE_CASE__ : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Any = hidden_act
SCREAMING_SNAKE_CASE__ : Tuple = intermediate_size
SCREAMING_SNAKE_CASE__ : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : int = layer_norm_eps
SCREAMING_SNAKE_CASE__ : int = position_embedding_type
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_cache
SCREAMING_SNAKE_CASE__ : List[str] = tie_word_embeddings
SCREAMING_SNAKE_CASE__ : Dict = num_image_with_embedding
SCREAMING_SNAKE_CASE__ : str = bos_token_id
SCREAMING_SNAKE_CASE__ : List[Any] = eos_token_id
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ : Any = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE__ : Dict = self.__class__.model_type
return output
| 680 | '''simple docstring'''
def lowerCamelCase__ ( A_ , A_ ):
_validate_point(A_ )
_validate_point(A_ )
if len(A_ ) != len(A_ ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(A_ , A_ ) ) )
def lowerCamelCase__ ( A_ ):
if point:
if isinstance(A_ , A_ ):
for item in point:
if not isinstance(A_ , (int, float) ):
UpperCAmelCase_ = (
"Expected a list of numbers as input, found "
F"""{type(A_ ).__name__}"""
)
raise TypeError(A_ )
else:
UpperCAmelCase_ = F"""Expected a list of numbers as input, found {type(A_ ).__name__}"""
raise TypeError(A_ )
else:
raise ValueError("Missing an input" )
def lowerCamelCase__ ( A_ , A_ ):
_validate_point(A_ )
_validate_point(A_ )
if len(A_ ) != len(A_ ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(A_ , A_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 | 0 |
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
lowercase_ = random.Random()
def A_ ( lowercase , lowercase=1.0 , lowercase=None , lowercase=None ) -> Tuple:
"""simple docstring"""
if rng is None:
UpperCAmelCase_ : int = global_rng
UpperCAmelCase_ : List[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , a_ : Optional[int] , a_ : List[Any]=7 , a_ : Union[str, Any]=4_00 , a_ : Optional[int]=20_00 , a_ : Any=1 , a_ : Dict=0.0 , a_ : List[str]=1_60_00 , a_ : Optional[int]=True , a_ : Any=80 , a_ : Any=16 , a_ : Dict=64 , a_ : Union[str, Any]="hann_window" , a_ : List[Any]=80 , a_ : Optional[int]=76_00 , a_ : List[Any]=1E-10 , a_ : str=True , )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = parent
UpperCAmelCase_ : Dict = batch_size
UpperCAmelCase_ : Union[str, Any] = min_seq_length
UpperCAmelCase_ : List[Any] = max_seq_length
UpperCAmelCase_ : Optional[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCAmelCase_ : Dict = feature_size
UpperCAmelCase_ : Any = padding_value
UpperCAmelCase_ : List[Any] = sampling_rate
UpperCAmelCase_ : str = do_normalize
UpperCAmelCase_ : int = num_mel_bins
UpperCAmelCase_ : int = hop_length
UpperCAmelCase_ : Tuple = win_length
UpperCAmelCase_ : int = win_function
UpperCAmelCase_ : List[str] = fmin
UpperCAmelCase_ : Union[str, Any] = fmax
UpperCAmelCase_ : List[Any] = mel_floor
UpperCAmelCase_ : Tuple = return_attention_mask
def a ( self : Optional[int] )-> List[str]:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def a ( self : int , a_ : Optional[int]=False , a_ : Union[str, Any]=False )-> Optional[int]:
"""simple docstring"""
def _flatten(a_ : Tuple ):
return list(itertools.chain(*UpperCamelCase__ ) )
if equal_length:
UpperCAmelCase_ : Optional[int] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCAmelCase_ : Tuple = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase_ : Union[str, Any] = [np.asarray(UpperCamelCase__ ) for x in speech_inputs]
return speech_inputs
def a ( self : List[Any] , a_ : int=False , a_ : Union[str, Any]=False )-> int:
"""simple docstring"""
if equal_length:
UpperCAmelCase_ : Tuple = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCAmelCase_ : Dict = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase_ : int = [np.asarray(UpperCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
class UpperCAmelCase_ (_A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Dict = SpeechTaFeatureExtractor
def a ( self : Optional[int] )-> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = SpeechTaFeatureExtractionTester(self )
def a ( self : int , a_ : List[str] )-> Optional[Any]:
"""simple docstring"""
self.assertTrue(np.all(np.mean(UpperCamelCase__ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(UpperCamelCase__ , axis=0 ) - 1 ) < 1E-3 ) )
def a ( self : Dict )-> List[str]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase_ : Union[str, Any] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
UpperCAmelCase_ : Union[str, Any] = [np.asarray(UpperCamelCase__ ) for speech_input in speech_inputs]
# Test not batched input
UpperCAmelCase_ : Optional[int] = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
UpperCAmelCase_ : Any = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-3 ) )
# Test batched
UpperCAmelCase_ : Union[str, Any] = feat_extract(UpperCamelCase__ , return_tensors="""np""" ).input_values
UpperCAmelCase_ : Optional[int] = feat_extract(UpperCamelCase__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-3 ) )
def a ( self : Optional[Any] )-> int:
"""simple docstring"""
UpperCAmelCase_ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase_ : Dict = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
UpperCAmelCase_ : List[Any] = ["""longest""", """max_length""", """do_not_pad"""]
UpperCAmelCase_ : str = [None, 16_00, None]
for max_length, padding in zip(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ : Tuple = feat_extract(UpperCamelCase__ , padding=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors="""np""" )
UpperCAmelCase_ : str = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00] )
self.assertTrue(input_values[0][8_00:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:10_00] )
self.assertTrue(input_values[0][10_00:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:12_00] )
def a ( self : int )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase_ : int = range(8_00 , 14_00 , 2_00 )
UpperCAmelCase_ : Optional[int] = [floats_list((1, x) )[0] for x in lengths]
UpperCAmelCase_ : List[str] = ["""longest""", """max_length""", """do_not_pad"""]
UpperCAmelCase_ : List[Any] = [None, 16_00, None]
for max_length, padding in zip(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ : Optional[Any] = feat_extract(UpperCamelCase__ , max_length=UpperCamelCase__ , padding=UpperCamelCase__ )
UpperCAmelCase_ : Optional[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00] )
self._check_zero_mean_unit_variance(input_values[1][:10_00] )
self._check_zero_mean_unit_variance(input_values[2][:12_00] )
def a ( self : str )-> Tuple:
"""simple docstring"""
UpperCAmelCase_ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase_ : Optional[int] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
UpperCAmelCase_ : List[Any] = feat_extract(
UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=10_00 , padding="""max_length""" , return_tensors="""np""" )
UpperCAmelCase_ : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def a ( self : List[Any] )-> int:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase_ : Tuple = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
UpperCAmelCase_ : Tuple = feat_extract(
UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=10_00 , padding="""longest""" , return_tensors="""np""" )
UpperCAmelCase_ : Optional[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1, :10_00] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 10_00) )
UpperCAmelCase_ : int = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
UpperCAmelCase_ : Union[str, Any] = feat_extract(
UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=20_00 , padding="""longest""" , return_tensors="""np""" )
UpperCAmelCase_ : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1, :10_00] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 12_00) )
def a ( self : Union[str, Any] )-> str:
"""simple docstring"""
UpperCAmelCase_ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase_ : Dict = np.random.rand(1_00 ).astype(np.floataa )
UpperCAmelCase_ : Optional[int] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCAmelCase_ : Optional[Any] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCAmelCase_ : Any = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def a ( self : str )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase_ : List[str] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
UpperCAmelCase_ : Optional[int] = [np.asarray(UpperCamelCase__ ) for speech_input in speech_inputs]
# Test feature size
UpperCAmelCase_ : Optional[int] = feature_extractor(audio_target=UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors="""np""" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
UpperCAmelCase_ : str = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_values
UpperCAmelCase_ : Optional[Any] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-3 ) )
# Test batched
UpperCAmelCase_ : Dict = feature_extractor(UpperCamelCase__ , return_tensors="""np""" ).input_values
UpperCAmelCase_ : Dict = feature_extractor(UpperCamelCase__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
UpperCAmelCase_ : Optional[Any] = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
UpperCAmelCase_ : Dict = np.asarray(UpperCamelCase__ )
UpperCAmelCase_ : str = feature_extractor(UpperCamelCase__ , return_tensors="""np""" ).input_values
UpperCAmelCase_ : str = feature_extractor(UpperCamelCase__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-3 ) )
def a ( self : Tuple )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Dict = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase_ : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase_ : str = feat_extract.model_input_names[0]
UpperCAmelCase_ : List[str] = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(UpperCamelCase__ ) == len(UpperCamelCase__ ) for x, y in zip(UpperCamelCase__ , processed_features[input_name] ) ) )
UpperCAmelCase_ : List[str] = self.feat_extract_tester.prepare_inputs_for_target(equal_length=UpperCamelCase__ )
UpperCAmelCase_ : Dict = BatchFeature({input_name: speech_inputs} , tensor_type="""np""" )
UpperCAmelCase_ : List[Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase_ : Optional[Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def a ( self : List[Any] )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_target(equal_length=UpperCamelCase__ )
UpperCAmelCase_ : Any = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase_ : Optional[Any] = feat_extract.model_input_names[0]
UpperCAmelCase_ : int = BatchFeature({input_name: speech_inputs} , tensor_type="""pt""" )
UpperCAmelCase_ : Tuple = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase_ : Optional[Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def a ( self : Any )-> str:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase_ : List[Any] = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase_ : List[str] = feat_extract.model_input_names[0]
UpperCAmelCase_ : int = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase_ : Tuple = feat_extract.num_mel_bins # hack!
UpperCAmelCase_ : Dict = feat_extract.pad(UpperCamelCase__ , padding="""longest""" , return_tensors="""np""" )[input_name]
UpperCAmelCase_ : Any = feat_extract.pad(UpperCamelCase__ , padding="""longest""" , return_tensors="""pt""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def a ( self : Any )-> Dict:
"""simple docstring"""
UpperCAmelCase_ : List[str] = self.feat_extract_dict
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : int = self.feature_extraction_class(**UpperCamelCase__ )
UpperCAmelCase_ : List[Any] = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase_ : Union[str, Any] = [len(UpperCamelCase__ ) for x in speech_inputs]
UpperCAmelCase_ : Dict = feat_extract.model_input_names[0]
UpperCAmelCase_ : Optional[int] = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase_ : Dict = feat_extract.num_mel_bins # hack!
UpperCAmelCase_ : int = feat_extract.pad(UpperCamelCase__ , padding="""longest""" , return_tensors="""np""" )
self.assertIn("""attention_mask""" , UpperCamelCase__ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , UpperCamelCase__ )
def a ( self : int )-> int:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self.feat_extract_dict
UpperCAmelCase_ : List[Any] = True
UpperCAmelCase_ : List[Any] = self.feature_extraction_class(**UpperCamelCase__ )
UpperCAmelCase_ : List[Any] = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase_ : List[str] = [len(UpperCamelCase__ ) for x in speech_inputs]
UpperCAmelCase_ : Dict = feat_extract.model_input_names[0]
UpperCAmelCase_ : Any = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase_ : int = min(UpperCamelCase__ )
UpperCAmelCase_ : Optional[int] = feat_extract.num_mel_bins # hack!
UpperCAmelCase_ : Optional[int] = feat_extract.pad(
UpperCamelCase__ , padding="""max_length""" , max_length=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors="""np""" )
self.assertIn("""attention_mask""" , UpperCamelCase__ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def a ( self : List[Any] , a_ : Dict )-> int:
"""simple docstring"""
from datasets import load_dataset
UpperCAmelCase_ : str = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
UpperCAmelCase_ : Optional[Any] = ds.sort("""id""" ).select(range(UpperCamelCase__ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def a ( self : Any )-> Dict:
"""simple docstring"""
UpperCAmelCase_ : str = torch.tensor(
[2.3804E-03, 2.0752E-03, 1.9836E-03, 2.1057E-03, 1.6174E-03,
3.0518E-04, 9.1553E-05, 3.3569E-04, 9.7656E-04, 1.8311E-03,
2.0142E-03, 2.1057E-03, 1.7395E-03, 4.5776E-04, -3.9673E-04,
4.5776E-04, 1.0071E-03, 9.1553E-05, 4.8828E-04, 1.1597E-03,
7.3242E-04, 9.4604E-04, 1.8005E-03, 1.8311E-03, 8.8501E-04,
4.2725E-04, 4.8828E-04, 7.3242E-04, 1.0986E-03, 2.1057E-03] )
# fmt: on
UpperCAmelCase_ : Optional[Any] = self._load_datasamples(1 )
UpperCAmelCase_ : Any = SpeechTaFeatureExtractor()
UpperCAmelCase_ : Union[str, Any] = feature_extractor(UpperCamelCase__ , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 9_36_80) )
self.assertTrue(torch.allclose(input_values[0, :30] , UpperCamelCase__ , atol=1E-6 ) )
def a ( self : List[str] )-> int:
"""simple docstring"""
UpperCAmelCase_ : Tuple = torch.tensor(
[-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777,
-3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386,
-3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571,
-3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998] )
# fmt: on
UpperCAmelCase_ : int = self._load_datasamples(1 )
UpperCAmelCase_ : Dict = SpeechTaFeatureExtractor()
UpperCAmelCase_ : str = feature_extractor(audio_target=UpperCamelCase__ , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 3_66, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , UpperCamelCase__ , atol=1E-4 ) )
| 470 | '''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
__snake_case : Optional[int] = logging.getLogger(__name__)
def lowerCamelCase__ ( A_ , A_ ):
# save results
if os.path.exists(A_ ):
if os.path.exists(os.path.join(A_ , "config.json" ) ) and os.path.isfile(
os.path.join(A_ , "config.json" ) ):
os.remove(os.path.join(A_ , "config.json" ) )
if os.path.exists(os.path.join(A_ , "pytorch_model.bin" ) ) and os.path.isfile(
os.path.join(A_ , "pytorch_model.bin" ) ):
os.remove(os.path.join(A_ , "pytorch_model.bin" ) )
else:
os.makedirs(A_ )
model.save_pretrained(A_ )
def lowerCamelCase__ ( A_ , A_=False ):
UpperCAmelCase_ = 2
if unlogit:
UpperCAmelCase_ = torch.pow(A_ , A_ )
UpperCAmelCase_ = p * torch.log(A_ )
UpperCAmelCase_ = 0
return -plogp.sum(dim=-1 )
def lowerCamelCase__ ( A_ ):
logger.info("lv, h >\t" + "\t".join(F"""{x + 1}""" for x in range(len(A_ ) ) ) )
for row in range(len(A_ ) ):
if tensor.dtype != torch.long:
logger.info(F"""layer {row + 1}:\t""" + "\t".join(F"""{x:.5f}""" for x in tensor[row].cpu().data ) )
else:
logger.info(F"""layer {row + 1}:\t""" + "\t".join(F"""{x:d}""" for x in tensor[row].cpu().data ) )
def lowerCamelCase__ ( A_ , A_ , A_ , A_=True , A_=True , A_=None , A_=False ):
UpperCAmelCase_ , UpperCAmelCase_ = model.config.num_hidden_layers, model.config.num_attention_heads
UpperCAmelCase_ = torch.zeros(A_ , A_ ).to(args.device )
UpperCAmelCase_ = torch.zeros(A_ , A_ ).to(args.device )
if head_mask is None:
UpperCAmelCase_ = torch.ones(A_ , A_ ).to(args.device )
head_mask.requires_grad_(requires_grad=A_ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
UpperCAmelCase_ = None
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = 0.0
for step, inputs in enumerate(tqdm(A_ , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ):
UpperCAmelCase_ = tuple(t.to(args.device ) for t in inputs )
((UpperCAmelCase_) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
UpperCAmelCase_ = model(A_ , labels=A_ , head_mask=A_ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(A_ ):
UpperCAmelCase_ = entropy(attn.detach() , A_ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(A_ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
UpperCAmelCase_ = 2
UpperCAmelCase_ = torch.pow(torch.pow(A_ , A_ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
UpperCAmelCase_ = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("Attention entropies" )
print_ad_tensor(A_ )
if compute_importance:
logger.info("Head importance scores" )
print_ad_tensor(A_ )
logger.info("Head ranked by importance scores" )
UpperCAmelCase_ = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
UpperCAmelCase_ = torch.arange(
head_importance.numel() , device=args.device )
UpperCAmelCase_ = head_ranks.view_as(A_ )
print_ad_tensor(A_ )
return attn_entropy, head_importance, total_loss
def lowerCamelCase__ ( A_ , A_ , A_ ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = compute_heads_importance(A_ , A_ , A_ , compute_entropy=A_ )
UpperCAmelCase_ = 1 / loss # instead of downsteam score use the LM loss
logger.info("Pruning: original score: %f, threshold: %f" , A_ , original_score * args.masking_threshold )
UpperCAmelCase_ = torch.ones_like(A_ )
UpperCAmelCase_ = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
UpperCAmelCase_ = original_score
while current_score >= original_score * args.masking_threshold:
UpperCAmelCase_ = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
UpperCAmelCase_ = float("Inf" )
UpperCAmelCase_ = head_importance.view(-1 ).sort()[1]
if len(A_ ) <= num_to_mask:
print("BREAK BY num_to_mask" )
break
# mask heads
UpperCAmelCase_ = current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) )
UpperCAmelCase_ = new_head_mask.view(-1 )
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = new_head_mask.view_as(A_ )
UpperCAmelCase_ = new_head_mask.clone().detach()
print_ad_tensor(A_ )
# Compute metric and head importance again
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = compute_heads_importance(
A_ , A_ , A_ , compute_entropy=A_ , head_mask=A_ )
UpperCAmelCase_ = 1 / loss
logger.info(
"Masking: current score: %f, remaining heads %d (%.1f percents)" , A_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("Final head mask" )
print_ad_tensor(A_ )
np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowerCamelCase__ ( A_ , A_ , A_ , A_ ):
UpperCAmelCase_ = datetime.now()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = compute_heads_importance(
A_ , A_ , A_ , compute_entropy=A_ , compute_importance=A_ , head_mask=A_ )
UpperCAmelCase_ = 1 / loss
UpperCAmelCase_ = datetime.now() - before_time
UpperCAmelCase_ = sum(p.numel() for p in model.parameters() )
UpperCAmelCase_ = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(A_ ) )
}
for k, v in heads_to_prune.items():
if isinstance(A_ , A_ ):
UpperCAmelCase_ = [
v,
]
assert sum(len(A_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(A_ )
UpperCAmelCase_ = sum(p.numel() for p in model.parameters() )
UpperCAmelCase_ = datetime.now()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = compute_heads_importance(
A_ , A_ , A_ , compute_entropy=A_ , compute_importance=A_ , head_mask=A_ , actually_pruned=A_ , )
UpperCAmelCase_ = 1 / loss
UpperCAmelCase_ = datetime.now() - before_time
logger.info(
"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , A_ , A_ , pruned_num_params / original_num_params * 100 , )
logger.info("Pruning: score with masking: %f score with pruning: %f" , A_ , A_ )
logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 100 )
save_model(A_ , args.output_dir )
def lowerCamelCase__ ( ):
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir" , default=A_ , type=A_ , required=A_ , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , )
parser.add_argument(
"--model_name_or_path" , default=A_ , type=A_ , required=A_ , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--output_dir" , default=A_ , type=A_ , required=A_ , help="The output directory where the model predictions and checkpoints will be written." , )
# Other parameters
parser.add_argument(
"--config_name" , default="" , type=A_ , help="Pretrained config name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--tokenizer_name" , default="" , type=A_ , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--cache_dir" , default=A_ , type=A_ , help="Where do you want to store the pre-trained models downloaded from s3" , )
parser.add_argument(
"--data_subset" , type=A_ , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." )
parser.add_argument(
"--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
parser.add_argument(
"--dont_normalize_importance_by_layer" , action="store_true" , help="Don't normalize importance score by layers" )
parser.add_argument(
"--dont_normalize_global_importance" , action="store_true" , help="Don't normalize all importance scores between 0 and 1" , )
parser.add_argument(
"--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." )
parser.add_argument(
"--masking_threshold" , default=0.9 , type=A_ , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , )
parser.add_argument(
"--masking_amount" , default=0.1 , type=A_ , help="Amount to heads to masking at each masking step." )
parser.add_argument("--metric_name" , default="acc" , type=A_ , help="Metric to use for head masking." )
parser.add_argument(
"--max_seq_length" , default=128 , type=A_ , help=(
"The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded."
) , )
parser.add_argument("--batch_size" , default=1 , type=A_ , help="Batch size." )
parser.add_argument("--seed" , type=A_ , default=42 )
parser.add_argument("--local_rank" , type=A_ , default=-1 , help="local_rank for distributed training on gpus" )
parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" )
parser.add_argument("--server_ip" , type=A_ , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=A_ , default="" , help="Can be used for distant debugging." )
UpperCAmelCase_ = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=A_ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
UpperCAmelCase_ = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" )
UpperCAmelCase_ = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
UpperCAmelCase_ = torch.device("cuda" , args.local_rank )
UpperCAmelCase_ = 1
torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
UpperCAmelCase_ = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
UpperCAmelCase_ = nn.parallel.DistributedDataParallel(
A_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=A_ )
elif args.n_gpu > 1:
UpperCAmelCase_ = nn.DataParallel(A_ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=A_ )
torch.save(A_ , os.path.join(args.output_dir , "run_args.bin" ) )
logger.info("Training/evaluation parameters %s" , A_ )
# Prepare dataset
UpperCAmelCase_ = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
UpperCAmelCase_ = (torch.from_numpy(A_ ),)
UpperCAmelCase_ = TensorDataset(*A_ )
UpperCAmelCase_ = RandomSampler(A_ )
UpperCAmelCase_ = DataLoader(A_ , sampler=A_ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(A_ , A_ , A_ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
UpperCAmelCase_ = mask_heads(A_ , A_ , A_ )
prune_heads(A_ , A_ , A_ , A_ )
if __name__ == "__main__":
main()
| 660 | 0 |
'''simple docstring'''
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )-> List[str]:
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
__UpperCAmelCase = (low + high) // 2
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = max_subarray(A_ , A_ , A_ )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = max_subarray(A_ , mid + 1 , A_ )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = max_cross_sum(A_ , A_ , A_ , A_ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )-> List[str]:
__UpperCAmelCase , __UpperCAmelCase = float('-inf' ), -1
__UpperCAmelCase , __UpperCAmelCase = float('-inf' ), -1
__UpperCAmelCase = 0
for i in range(A_ , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
__UpperCAmelCase = summ
__UpperCAmelCase = i
__UpperCAmelCase = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
__UpperCAmelCase = summ
__UpperCAmelCase = i
return max_left, max_right, (left_sum + right_sum)
def _lowerCAmelCase ( _lowerCAmelCase )-> Union[str, Any]:
__UpperCAmelCase = [randint(1 , A_ ) for _ in range(A_ )]
__UpperCAmelCase = time.time()
max_subarray(A_ , 0 , input_size - 1 )
__UpperCAmelCase = time.time()
return end - start
def _lowerCAmelCase ( )-> Union[str, Any]:
__UpperCAmelCase = [10, 1_00, 10_00, 1_00_00, 5_00_00, 10_00_00, 20_00_00, 30_00_00, 40_00_00, 50_00_00]
__UpperCAmelCase = [time_max_subarray(A_ ) for input_size in input_sizes]
print('No of Inputs\t\tTime Taken' )
for input_size, runtime in zip(A_ , A_ ):
print(A_ , '\t\t' , A_ )
plt.plot(A_ , A_ )
plt.xlabel('Number of Inputs' )
plt.ylabel('Time taken in seconds' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 126 | '''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__snake_case : str = logging.getLogger(__name__)
def lowerCamelCase__ ( ):
UpperCAmelCase_ = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path" , type=A_ , default="data/dump.txt" , help="The path to the data." )
parser.add_argument("--tokenizer_type" , type=A_ , default="bert" , choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name" , type=A_ , default="bert-base-uncased" , help="The tokenizer to use." )
parser.add_argument("--dump_file" , type=A_ , default="data/dump" , help="The dump file prefix." )
UpperCAmelCase_ = parser.parse_args()
logger.info(F"""Loading Tokenizer ({args.tokenizer_name})""" )
if args.tokenizer_type == "bert":
UpperCAmelCase_ = BertTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase_ = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
UpperCAmelCase_ = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
UpperCAmelCase_ = RobertaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase_ = tokenizer.special_tokens_map["cls_token"] # `<s>`
UpperCAmelCase_ = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
UpperCAmelCase_ = GPTaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase_ = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
UpperCAmelCase_ = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(F"""Loading text from {args.file_path}""" )
with open(args.file_path , "r" , encoding="utf8" ) as fp:
UpperCAmelCase_ = fp.readlines()
logger.info("Start encoding" )
logger.info(F"""{len(A_ )} examples to process.""" )
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
UpperCAmelCase_ = 10_000
UpperCAmelCase_ = time.time()
for text in data:
UpperCAmelCase_ = F"""{bos} {text.strip()} {sep}"""
UpperCAmelCase_ = tokenizer.encode(A_ , add_special_tokens=A_ )
rslt.append(A_ )
iter += 1
if iter % interval == 0:
UpperCAmelCase_ = time.time()
logger.info(F"""{iter} examples processed. - {(end-start):.2f}s/{interval}expl""" )
UpperCAmelCase_ = time.time()
logger.info("Finished binarization" )
logger.info(F"""{len(A_ )} examples processed.""" )
UpperCAmelCase_ = F"""{args.dump_file}.{args.tokenizer_name}.pickle"""
UpperCAmelCase_ = tokenizer.vocab_size
if vocab_size < (1 << 16):
UpperCAmelCase_ = [np.uintaa(A_ ) for d in rslt]
else:
UpperCAmelCase_ = [np.intaa(A_ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F"""Dump to {dp_file}""" )
with open(A_ , "wb" ) as handle:
pickle.dump(rslt_ , A_ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 660 | 0 |
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
UpperCamelCase = ''''''
if version.parse(importlib_metadata.version('jiwer')) < version.parse('2.3.0'):
class _A ( tr.AbstractTransform ):
def __init__( self : Dict , lowerCamelCase__ : str = " " ):
"""simple docstring"""
__UpperCamelCase : Dict = sentence_delimiter
def a ( self : Dict , lowerCamelCase__ : Any ):
"""simple docstring"""
return list(UpperCamelCase__ )
def a ( self : Union[str, Any] , lowerCamelCase__ : List[Any] ):
"""simple docstring"""
__UpperCamelCase : Any = []
for sent_idx, sentence in enumerate(UpperCamelCase__ ):
chars.extend(self.process_string(UpperCamelCase__ ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(UpperCamelCase__ ) - 1:
chars.append(self.sentence_delimiter )
return chars
UpperCamelCase = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
UpperCamelCase = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
UpperCamelCase = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
UpperCamelCase = '''\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
'''
UpperCamelCase = '''
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> cer = datasets.load_metric("cer")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def a ( self : List[Any] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
"""https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates""",
] , )
def a ( self : Optional[int] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[str]=False ):
"""simple docstring"""
if concatenate_texts:
return jiwer.compute_measures(
UpperCamelCase__ , UpperCamelCase__ , truth_transform=UpperCamelCase__ , hypothesis_transform=UpperCamelCase__ , )["wer"]
__UpperCamelCase : Union[str, Any] = 0
__UpperCamelCase : Dict = 0
for prediction, reference in zip(UpperCamelCase__ , UpperCamelCase__ ):
__UpperCamelCase : Union[str, Any] = jiwer.compute_measures(
UpperCamelCase__ , UpperCamelCase__ , truth_transform=UpperCamelCase__ , hypothesis_transform=UpperCamelCase__ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 269 | '''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__snake_case : Dict = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
__snake_case : str = json.load(f)
@require_torch
class lowercase_ ( unittest.TestCase ):
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Dict:
"""simple docstring"""
return FSMTTokenizer.from_pretrained(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
UpperCAmelCase_ = FSMTForConditionalGeneration.from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["en-ru", 26.0],
["ru-en", 22.0],
["en-de", 22.0],
["de-en", 29.0],
] )
@slow
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = F"""facebook/wmt19-{pair}"""
UpperCAmelCase_ = self.get_tokenizer(UpperCamelCase__ )
UpperCAmelCase_ = self.get_model(UpperCamelCase__ )
UpperCAmelCase_ = bleu_data[pair]["src"]
UpperCAmelCase_ = bleu_data[pair]["tgt"]
UpperCAmelCase_ = tokenizer(UpperCamelCase__ , return_tensors="pt" , truncation=UpperCamelCase__ , padding="longest" ).to(UpperCamelCase__ )
UpperCAmelCase_ = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
UpperCAmelCase_ = tokenizer.batch_decode(
UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )
UpperCAmelCase_ = calculate_bleu(UpperCamelCase__ , UpperCamelCase__ )
print(UpperCamelCase__ )
self.assertGreaterEqual(scores["bleu"] , UpperCamelCase__ )
| 660 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
UpperCAmelCase_ : int = logging.get_logger(__name__)
class lowercase__ ( _A ):
'''simple docstring'''
def __init__( self , *__snake_case , **__snake_case ):
warnings.warn(
"""The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PoolFormerImageProcessor instead.""" , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 533 | '''simple docstring'''
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
__snake_case : List[Any] = {
'''sample_size''': 32,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': 10_00,
'''block_out_channels''': [32, 64],
'''attention_head_dim''': 8,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
__snake_case : Optional[int] = {
'''sample_size''': 64,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 3,
'''num_class_embeds''': 10_00,
'''block_out_channels''': [1_92, 1_92 * 2, 1_92 * 3, 1_92 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
__snake_case : int = {
'''sample_size''': 2_56,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': None,
'''block_out_channels''': [2_56, 2_56, 2_56 * 2, 2_56 * 2, 2_56 * 4, 2_56 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''default''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
__snake_case : Dict = {
'''num_train_timesteps''': 40,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
__snake_case : Tuple = {
'''num_train_timesteps''': 2_01,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
__snake_case : str = {
'''num_train_timesteps''': 1_51,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
def lowerCamelCase__ ( A_ ):
if isinstance(A_ , A_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("boolean value expected" )
def lowerCamelCase__ ( A_ , A_ , A_ , A_ , A_=False ):
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.0.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.0.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.2.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.2.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.emb_layers.1.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.emb_layers.1.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.0.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.0.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.3.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.3.bias"""]
if has_skip:
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.skip_connection.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.skip_connection.bias"""]
return new_checkpoint
def lowerCamelCase__ ( A_ , A_ , A_ , A_ , A_=None ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = checkpoint[F"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = checkpoint[F"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 )
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.norm.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.norm.bias"""]
UpperCAmelCase_ = weight_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = bias_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = weight_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = bias_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = weight_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = bias_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = (
checkpoint[F"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 )
)
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def lowerCamelCase__ ( A_ , A_ ):
UpperCAmelCase_ = torch.load(A_ , map_location="cpu" )
UpperCAmelCase_ = {}
UpperCAmelCase_ = checkpoint["time_embed.0.weight"]
UpperCAmelCase_ = checkpoint["time_embed.0.bias"]
UpperCAmelCase_ = checkpoint["time_embed.2.weight"]
UpperCAmelCase_ = checkpoint["time_embed.2.bias"]
if unet_config["num_class_embeds"] is not None:
UpperCAmelCase_ = checkpoint["label_emb.weight"]
UpperCAmelCase_ = checkpoint["input_blocks.0.0.weight"]
UpperCAmelCase_ = checkpoint["input_blocks.0.0.bias"]
UpperCAmelCase_ = unet_config["down_block_types"]
UpperCAmelCase_ = unet_config["layers_per_block"]
UpperCAmelCase_ = unet_config["attention_head_dim"]
UpperCAmelCase_ = unet_config["block_out_channels"]
UpperCAmelCase_ = 1
UpperCAmelCase_ = channels_list[0]
for i, layer_type in enumerate(A_ ):
UpperCAmelCase_ = channels_list[i]
UpperCAmelCase_ = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(A_ ):
UpperCAmelCase_ = F"""down_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase_ = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(A_ ):
UpperCAmelCase_ = F"""down_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase_ = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
UpperCAmelCase_ = F"""down_blocks.{i}.attentions.{j}"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.1"""
UpperCAmelCase_ = convert_attention(
A_ , A_ , A_ , A_ , A_ )
current_layer += 1
if i != len(A_ ) - 1:
UpperCAmelCase_ = F"""down_blocks.{i}.downsamplers.0"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
current_layer += 1
UpperCAmelCase_ = current_channels
# hardcoded the mid-block for now
UpperCAmelCase_ = "mid_block.resnets.0"
UpperCAmelCase_ = "middle_block.0"
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
UpperCAmelCase_ = "mid_block.attentions.0"
UpperCAmelCase_ = "middle_block.1"
UpperCAmelCase_ = convert_attention(A_ , A_ , A_ , A_ , A_ )
UpperCAmelCase_ = "mid_block.resnets.1"
UpperCAmelCase_ = "middle_block.2"
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
UpperCAmelCase_ = 0
UpperCAmelCase_ = unet_config["up_block_types"]
for i, layer_type in enumerate(A_ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase_ = F"""up_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""output_blocks.{current_layer}.0"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
current_layer += 1
if i != len(A_ ) - 1:
UpperCAmelCase_ = F"""up_blocks.{i}.upsamplers.0"""
UpperCAmelCase_ = F"""output_blocks.{current_layer-1}.1"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase_ = F"""up_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""output_blocks.{current_layer}.0"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
UpperCAmelCase_ = F"""up_blocks.{i}.attentions.{j}"""
UpperCAmelCase_ = F"""output_blocks.{current_layer}.1"""
UpperCAmelCase_ = convert_attention(
A_ , A_ , A_ , A_ , A_ )
current_layer += 1
if i != len(A_ ) - 1:
UpperCAmelCase_ = F"""up_blocks.{i}.upsamplers.0"""
UpperCAmelCase_ = F"""output_blocks.{current_layer-1}.2"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
UpperCAmelCase_ = checkpoint["out.0.weight"]
UpperCAmelCase_ = checkpoint["out.0.bias"]
UpperCAmelCase_ = checkpoint["out.2.weight"]
UpperCAmelCase_ = checkpoint["out.2.bias"]
return new_checkpoint
if __name__ == "__main__":
__snake_case : List[str] = argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
__snake_case : List[str] = parser.parse_args()
__snake_case : Any = strabool(args.class_cond)
__snake_case : List[str] = os.path.basename(args.unet_path)
print(F'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
__snake_case : Optional[int] = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__snake_case : Union[str, Any] = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
__snake_case : List[str] = TEST_UNET_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
__snake_case : Optional[Any] = None
__snake_case : Optional[int] = con_pt_to_diffuser(args.unet_path, unet_config)
__snake_case : str = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
__snake_case : Tuple = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
__snake_case : Optional[int] = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__snake_case : Union[str, Any] = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
__snake_case : Optional[Any] = CMStochasticIterativeScheduler(**scheduler_config)
__snake_case : Dict = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 660 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__SCREAMING_SNAKE_CASE =logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""")
__SCREAMING_SNAKE_CASE =list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
__SCREAMING_SNAKE_CASE =tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __magic_name__ :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = field(
default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"})
SCREAMING_SNAKE_CASE__ : Tuple = field(
default=_A , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."})
SCREAMING_SNAKE_CASE__ : Dict = field(
default=_A , metadata={"help": "The column name of the images in the files. If not set, will try to use 'image' or 'img'."} , )
SCREAMING_SNAKE_CASE__ : Tuple = field(default=_A , metadata={"help": "A folder containing the training data."})
SCREAMING_SNAKE_CASE__ : int = field(default=_A , metadata={"help": "A folder containing the validation data."})
SCREAMING_SNAKE_CASE__ : str = field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."})
SCREAMING_SNAKE_CASE__ : int = field(default=32 , metadata={"help": "The size of the square patches to use for masking."})
SCREAMING_SNAKE_CASE__ : str = field(
default=0.6 , metadata={"help": "Percentage of patches to mask."} , )
SCREAMING_SNAKE_CASE__ : int = field(
default=_A , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
SCREAMING_SNAKE_CASE__ : List[Any] = field(
default=_A , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def _A ( self: int ):
SCREAMING_SNAKE_CASE_ = {}
if self.train_dir is not None:
SCREAMING_SNAKE_CASE_ = self.train_dir
if self.validation_dir is not None:
SCREAMING_SNAKE_CASE_ = self.validation_dir
SCREAMING_SNAKE_CASE_ = data_files if data_files else None
@dataclass
class __magic_name__ :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = field(
default=_A , metadata={
"help": (
"The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a "
"checkpoint identifier on the hub. "
"Don't set if you want to train a model from scratch."
)
} , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = field(
default=_A , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(_A)} , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = field(
default=_A , metadata={"help": "Pretrained config name or path if not the same as model_name"})
SCREAMING_SNAKE_CASE__ : Dict = field(
default=_A , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = field(
default=_A , metadata={"help": "Where do you want to store (cache) the pretrained models/datasets downloaded from the hub"} , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
SCREAMING_SNAKE_CASE__ : Any = field(default=_A , metadata={"help": "Name or path of preprocessor config."})
SCREAMING_SNAKE_CASE__ : int = field(
default=_A , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
SCREAMING_SNAKE_CASE__ : str = field(
default=_A , metadata={
"help": (
"The size (resolution) of each image. If not specified, will use `image_size` of the configuration."
)
} , )
SCREAMING_SNAKE_CASE__ : str = field(
default=_A , metadata={
"help": (
"The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration."
)
} , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = field(
default=_A , metadata={"help": "Stride to use for the encoder."} , )
class __magic_name__ :
'''simple docstring'''
def __init__( self: str , _lowerCamelCase: List[Any]=1_92 , _lowerCamelCase: Any=32 , _lowerCamelCase: Union[str, Any]=4 , _lowerCamelCase: Union[str, Any]=0.6 ):
SCREAMING_SNAKE_CASE_ = input_size
SCREAMING_SNAKE_CASE_ = mask_patch_size
SCREAMING_SNAKE_CASE_ = model_patch_size
SCREAMING_SNAKE_CASE_ = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError('''Input size must be divisible by mask patch size''' )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError('''Mask patch size must be divisible by model patch size''' )
SCREAMING_SNAKE_CASE_ = self.input_size // self.mask_patch_size
SCREAMING_SNAKE_CASE_ = self.mask_patch_size // self.model_patch_size
SCREAMING_SNAKE_CASE_ = self.rand_size**2
SCREAMING_SNAKE_CASE_ = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self: Optional[int] ):
SCREAMING_SNAKE_CASE_ = np.random.permutation(self.token_count )[: self.mask_count]
SCREAMING_SNAKE_CASE_ = np.zeros(self.token_count , dtype=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = mask.reshape((self.rand_size, self.rand_size) )
SCREAMING_SNAKE_CASE_ = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def a (_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = torch.stack([example['''pixel_values'''] for example in examples] )
SCREAMING_SNAKE_CASE_ = torch.stack([example['''mask'''] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def a ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_mim''' , A_ , A_ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = training_args.get_process_log_level()
logger.setLevel(A_ )
transformers.utils.logging.set_verbosity(A_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset.
SCREAMING_SNAKE_CASE_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
SCREAMING_SNAKE_CASE_ = None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , A_ ) and data_args.train_val_split > 0.0:
SCREAMING_SNAKE_CASE_ = ds['''train'''].train_test_split(data_args.train_val_split )
SCREAMING_SNAKE_CASE_ = split['''train''']
SCREAMING_SNAKE_CASE_ = split['''test''']
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE_ = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(model_args.config_name_or_path , **A_ )
elif model_args.model_name_or_path:
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(model_args.model_name_or_path , **A_ )
else:
SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(F"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(F"New config: {config}" )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(A_ , '''decoder_type''' ):
SCREAMING_SNAKE_CASE_ = '''simmim'''
# adapt config
SCREAMING_SNAKE_CASE_ = model_args.image_size if model_args.image_size is not None else config.image_size
SCREAMING_SNAKE_CASE_ = model_args.patch_size if model_args.patch_size is not None else config.patch_size
SCREAMING_SNAKE_CASE_ = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
'''image_size''': model_args.image_size,
'''patch_size''': model_args.patch_size,
'''encoder_stride''': model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **A_ )
elif model_args.model_name_or_path:
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **A_ )
else:
SCREAMING_SNAKE_CASE_ = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
SCREAMING_SNAKE_CASE_ = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
SCREAMING_SNAKE_CASE_ = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=A_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
SCREAMING_SNAKE_CASE_ = AutoModelForMaskedImageModeling.from_config(A_ )
if training_args.do_train:
SCREAMING_SNAKE_CASE_ = ds['''train'''].column_names
else:
SCREAMING_SNAKE_CASE_ = ds['''validation'''].column_names
if data_args.image_column_name is not None:
SCREAMING_SNAKE_CASE_ = data_args.image_column_name
elif "image" in column_names:
SCREAMING_SNAKE_CASE_ = '''image'''
elif "img" in column_names:
SCREAMING_SNAKE_CASE_ = '''img'''
else:
SCREAMING_SNAKE_CASE_ = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
SCREAMING_SNAKE_CASE_ = Compose(
[
Lambda(lambda _lowerCAmelCase : img.convert('''RGB''' ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
SCREAMING_SNAKE_CASE_ = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = [transforms(A_ ) for image in examples[image_column_name]]
SCREAMING_SNAKE_CASE_ = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE_ = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(A_ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE_ = (
ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(A_ )
# Initialize our trainer
SCREAMING_SNAKE_CASE_ = Trainer(
model=A_ , args=A_ , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=A_ , data_collator=A_ , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE_ = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE_ = last_checkpoint
SCREAMING_SNAKE_CASE_ = trainer.train(resume_from_checkpoint=A_ )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
SCREAMING_SNAKE_CASE_ = trainer.evaluate()
trainer.log_metrics('''eval''' , A_ )
trainer.save_metrics('''eval''' , A_ )
# Write model card and (optionally) push to hub
SCREAMING_SNAKE_CASE_ = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''masked-image-modeling''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-image-modeling'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**A_ )
else:
trainer.create_model_card(**A_ )
if __name__ == "__main__":
main()
| 234 | '''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
__snake_case : Any = _symbol_database.Default()
__snake_case : Dict = _descriptor_pool.Default().AddSerializedFile(
B'''\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'''
)
__snake_case : Union[str, Any] = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, '''sentencepiece_model_pb2''', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
__snake_case : Any = None
__snake_case : Dict = B'''H\003'''
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
__snake_case : Union[str, Any] = 45
__snake_case : str = 15_81
__snake_case : Optional[int] = 15_17
__snake_case : Optional[Any] = 15_70
__snake_case : Union[str, Any] = 15_84
__snake_case : Any = 17_93
__snake_case : Optional[int] = 17_95
__snake_case : Tuple = 19_16
__snake_case : int = 18_64
__snake_case : Any = 19_05
__snake_case : Optional[int] = 19_19
__snake_case : str = 24_29
__snake_case : Tuple = 22_08
__snake_case : str = 24_18
__snake_case : Tuple = 23_23
__snake_case : Optional[int] = 24_07
# @@protoc_insertion_point(module_scope)
| 660 | 0 |
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : int = [0] * len(A_ )
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : List[str] = [1] * len(A_ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(A_ ) ):
if indegree[i] == 0:
queue.append(A_ )
while queue:
UpperCAmelCase_ : Optional[int] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
UpperCAmelCase_ : List[str] = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(A_ )
print(max(A_ ) )
# Adjacency list of Graph
__UpperCAmelCase = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 406 | '''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowercase_ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase_ = FlaxXLMRobertaModel.from_pretrained("xlm-roberta-base" )
UpperCAmelCase_ = AutoTokenizer.from_pretrained("xlm-roberta-base" )
UpperCAmelCase_ = "The dog is cute and lives in the garden house"
UpperCAmelCase_ = jnp.array([tokenizer.encode(UpperCamelCase__ )] )
UpperCAmelCase_ = (1, 1_2, 7_6_8) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
UpperCAmelCase_ = model(UpperCamelCase__ )["last_hidden_state"]
self.assertEqual(output.shape , UpperCamelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , UpperCamelCase__ , atol=1e-3 ) )
| 660 | 0 |
"""simple docstring"""
from itertools import permutations
def _UpperCamelCase ( UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
__UpperCAmelCase : List[Any] = [7, 11, 13, 17]
for i, test in enumerate(A_ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def _UpperCamelCase ( UpperCamelCase = 10 ) -> Optional[int]:
"""simple docstring"""
return sum(
int("".join(map(A_ , A_ ) ) )
for num in permutations(range(A_ ) )
if is_substring_divisible(A_ ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 77 | '''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCamelCase__ ( A_ , A_ , A_ ):
# Construct model
if gpta_config_file == "":
UpperCAmelCase_ = GPTaConfig()
else:
UpperCAmelCase_ = GPTaConfig.from_json_file(A_ )
UpperCAmelCase_ = GPTaModel(A_ )
# Load weights from numpy
load_tf_weights_in_gpta(A_ , A_ , A_ )
# Save pytorch-model
UpperCAmelCase_ = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
UpperCAmelCase_ = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , A_ )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(A_ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__snake_case : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--gpt2_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
__snake_case : Dict = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 660 | 0 |
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = len(A_ ) # No of vertices in graph
__SCREAMING_SNAKE_CASE = [0] * n
__SCREAMING_SNAKE_CASE = [False] * n
def dfs(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(A_ , A_ , A_ , id_ )
__SCREAMING_SNAKE_CASE = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
__SCREAMING_SNAKE_CASE = min(low[at] , low[to] )
__SCREAMING_SNAKE_CASE = []
for i in range(A_ ):
if not visited[i]:
dfs(A_ , -1 , A_ , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 682 | '''simple docstring'''
from typing import List
from .keymap import KEYMAP, get_character
def lowerCamelCase__ ( A_ ):
def decorator(A_ ):
UpperCAmelCase_ = getattr(A_ , "handle_key" , [] )
handle += [key]
setattr(A_ , "handle_key" , A_ )
return func
return decorator
def lowerCamelCase__ ( *A_ ):
def decorator(A_ ):
UpperCAmelCase_ = getattr(A_ , "handle_key" , [] )
handle += keys
setattr(A_ , "handle_key" , A_ )
return func
return decorator
class lowercase_ ( _A ):
def __new__( cls , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = super().__new__(cls , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not hasattr(UpperCamelCase__ , "key_handler" ):
setattr(UpperCamelCase__ , "key_handler" , {} )
setattr(UpperCamelCase__ , "handle_input" , KeyHandler.handle_input )
for value in attrs.values():
UpperCAmelCase_ = getattr(UpperCamelCase__ , "handle_key" , [] )
for key in handled_keys:
UpperCAmelCase_ = value
return new_cls
@staticmethod
def lowerCamelCase_ ( cls ) -> str:
"""simple docstring"""
UpperCAmelCase_ = get_character()
if char != KEYMAP["undefined"]:
UpperCAmelCase_ = ord(UpperCamelCase__ )
UpperCAmelCase_ = cls.key_handler.get(UpperCamelCase__ )
if handler:
UpperCAmelCase_ = char
return handler(cls )
else:
return None
def lowerCamelCase__ ( cls ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 660 | 0 |
"""simple docstring"""
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
_UpperCamelCase = datasets.utils.logging.get_logger(__name__)
class __UpperCAmelCase (folder_based_builder.FolderBasedBuilderConfig ):
'''simple docstring'''
_UpperCamelCase : Tuple = None
_UpperCamelCase : Any = None
class __UpperCAmelCase (folder_based_builder.FolderBasedBuilder ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = datasets.Audio()
_UpperCamelCase : Tuple = 'audio'
_UpperCamelCase : Dict = AudioFolderConfig
_UpperCamelCase : List[Any] = 42 # definition at the bottom of the script
_UpperCamelCase : str = AudioClassification(audio_column='audio' , label_column='label' )
_UpperCamelCase = [
'''.aiff''',
'''.au''',
'''.avr''',
'''.caf''',
'''.flac''',
'''.htk''',
'''.svx''',
'''.mat4''',
'''.mat5''',
'''.mpc2k''',
'''.ogg''',
'''.paf''',
'''.pvf''',
'''.raw''',
'''.rf64''',
'''.sd2''',
'''.sds''',
'''.ircam''',
'''.voc''',
'''.w64''',
'''.wav''',
'''.nist''',
'''.wavex''',
'''.wve''',
'''.xi''',
'''.mp3''',
'''.opus''',
]
_UpperCamelCase = AUDIO_EXTENSIONS
| 363 | '''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__snake_case : Optional[Any] = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowercase_ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=1_6 , UpperCamelCase__=1_3 , UpperCamelCase__=7 , UpperCamelCase__=1_4 , UpperCamelCase__=1_0 , UpperCamelCase__=1_9 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=True , UpperCamelCase__=1_6 , UpperCamelCase__=2 , UpperCamelCase__=4 , UpperCamelCase__=4 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=[1, 2, 3, 4, 5] , UpperCamelCase__=2_5 , UpperCamelCase__=5 , ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = d_model
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = prediction_length
UpperCAmelCase_ = context_length
UpperCAmelCase_ = cardinality
UpperCAmelCase_ = num_time_features
UpperCAmelCase_ = lags_sequence
UpperCAmelCase_ = embedding_dimension
UpperCAmelCase_ = is_training
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = context_length
UpperCAmelCase_ = prediction_length + label_length
UpperCAmelCase_ = label_length
UpperCAmelCase_ = moving_average
UpperCAmelCase_ = autocorrelation_factor
def lowerCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = config.context_length + max(config.lags_sequence )
UpperCAmelCase_ = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
UpperCAmelCase_ = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
UpperCAmelCase_ = floats_tensor([self.batch_size, _past_length] )
UpperCAmelCase_ = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
UpperCAmelCase_ = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
UpperCAmelCase_ = floats_tensor([self.batch_size, config.prediction_length] )
UpperCAmelCase_ = {
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = self.get_config()
UpperCAmelCase_ = self.prepare_autoformer_inputs_dict(UpperCamelCase__ )
return config, inputs_dict
def lowerCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> str:
"""simple docstring"""
UpperCAmelCase_ = AutoformerModel(config=UpperCamelCase__ ).to(UpperCamelCase__ ).eval()
UpperCAmelCase_ = model(**UpperCamelCase__ )
UpperCAmelCase_ = outputs.encoder_last_hidden_state
UpperCAmelCase_ = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = model.get_encoder()
encoder.save_pretrained(UpperCamelCase__ )
UpperCAmelCase_ = AutoformerEncoder.from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = model.create_network_inputs(**UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
UpperCAmelCase_ = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
UpperCAmelCase_ = encoder(inputs_embeds=UpperCamelCase__ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
UpperCAmelCase_ = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
UpperCAmelCase_ = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
UpperCAmelCase_ = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
UpperCAmelCase_ = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = model.get_decoder()
decoder.save_pretrained(UpperCamelCase__ )
UpperCAmelCase_ = AutoformerDecoder.from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
UpperCAmelCase_ = decoder(
trend=UpperCamelCase__ , inputs_embeds=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class lowercase_ ( _A , _A , unittest.TestCase ):
a_ = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
a_ = (AutoformerForPrediction,) if is_torch_available() else ()
a_ = {"""feature-extraction""": AutoformerModel} if is_torch_available() else {}
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = AutoformerModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = model_class.from_pretrained(UpperCamelCase__ , output_loading_info=UpperCamelCase__ )
self.assertEqual(info["missing_keys"] , [] )
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*UpperCamelCase__ )
@unittest.skip(reason="Model has no tokens embeddings" )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
pass
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase_ = inspect.signature(getattr(UpperCamelCase__ , "forward" ) )
# The main input is the name of the argument after `self`
UpperCAmelCase_ = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , UpperCamelCase__ )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(UpperCamelCase__ )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = [
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(UpperCamelCase__ )] , UpperCamelCase__ )
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = True
UpperCAmelCase_ = getattr(self.model_tester , "seq_length" , UpperCamelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , "decoder_seq_length" , UpperCamelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , "encoder_seq_length" , UpperCamelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , "d_model" , UpperCamelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , "num_attention_heads" , UpperCamelCase__ )
UpperCAmelCase_ = d_model // num_attention_heads
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
UpperCAmelCase_ = False
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
UpperCAmelCase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
UpperCAmelCase_ = outputs.encoder_attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
UpperCAmelCase_ = len(UpperCamelCase__ )
UpperCAmelCase_ = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
# decoder attentions
UpperCAmelCase_ = outputs.decoder_attentions
self.assertIsInstance(UpperCamelCase__ , (list, tuple) )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
UpperCAmelCase_ = outputs.cross_attentions
self.assertIsInstance(UpperCamelCase__ , (list, tuple) )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
UpperCAmelCase_ = True
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + 2 , len(UpperCamelCase__ ) )
UpperCAmelCase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def lowerCamelCase_ ( self ) -> str:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def lowerCamelCase__ ( A_="train-batch.pt" ):
UpperCAmelCase_ = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=A_ , repo_type="dataset" )
UpperCAmelCase_ = torch.load(A_ , map_location=A_ )
return batch
@require_torch
@slow
class lowercase_ ( unittest.TestCase ):
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(UpperCamelCase__ )
UpperCAmelCase_ = prepare_batch()
with torch.no_grad():
UpperCAmelCase_ = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
UpperCAmelCase_ = torch.Size(
(6_4, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , UpperCamelCase__ )
UpperCAmelCase_ = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
def lowerCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(UpperCamelCase__ )
UpperCAmelCase_ = prepare_batch("val-batch.pt" )
with torch.no_grad():
UpperCAmelCase_ = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
UpperCAmelCase_ = torch.Size((6_4, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , UpperCamelCase__ )
UpperCAmelCase_ = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(UpperCamelCase__ )
UpperCAmelCase_ = prepare_batch("val-batch.pt" )
with torch.no_grad():
UpperCAmelCase_ = model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
UpperCAmelCase_ = torch.Size((6_4, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , UpperCamelCase__ )
UpperCAmelCase_ = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=UpperCamelCase__ )
UpperCAmelCase_ = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , UpperCamelCase__ , rtol=1e-1 ) )
| 660 | 0 |
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def snake_case__ ( lowercase ):
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def snake_case__ ( lowercase ):
class _lowercase :
'''simple docstring'''
def __init__( self , lowerCamelCase__ ):
lowerCAmelCase_: Optional[int] = metric_id
class _lowercase :
'''simple docstring'''
SCREAMING_SNAKE_CASE: List[Any] = [MetricMock(_A ) for metric_id in ['accuracy', 'mse', 'precision', 'codeparrot/apps_metric']]
def _a ( self ):
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def snake_case__ ( lowercase , lowercase , lowercase , lowercase , lowercase ):
if "tmp_path" in args:
lowerCAmelCase_: Any = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(A_ , match="https://huggingface.co/docs/evaluate" ):
func(*A_ ) | 613 | '''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case : Dict = logging.get_logger(__name__)
__snake_case : Tuple = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
__snake_case : Tuple = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
__snake_case : Dict = {
'''abeja/gpt-neox-japanese-2.7b''': 20_48,
}
def lowerCamelCase__ ( A_ , A_ ):
with open(A_ , "r" , encoding="utf-8" ) as f:
UpperCAmelCase_ = json.loads(f.read() )
UpperCAmelCase_ = collections.OrderedDict()
UpperCAmelCase_ = collections.OrderedDict()
UpperCAmelCase_ = collections.OrderedDict()
with open(A_ , "r" , encoding="utf-8" ) as f:
UpperCAmelCase_ = f.readlines()
UpperCAmelCase_ = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(A_ ):
UpperCAmelCase_ = b
UpperCAmelCase_ = idx
for wd in b:
UpperCAmelCase_ = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class lowercase_ ( _A ):
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|startoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__=False , **UpperCamelCase__ , ) -> int:
"""simple docstring"""
super().__init__(
unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , do_clean_text=UpperCamelCase__ , **UpperCamelCase__ , )
if not os.path.isfile(UpperCamelCase__ ):
raise ValueError(
F"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
if not os.path.isfile(UpperCamelCase__ ):
raise ValueError(
F"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
UpperCAmelCase_ = do_clean_text
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = load_vocab_and_emoji(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase_ = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
return len(self.raw_vocab )
def lowerCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
return self.subword_tokenizer.tokenize(UpperCamelCase__ , clean=self.do_clean_text )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> int:
"""simple docstring"""
return self.vocab.get(UpperCamelCase__ , self.vocab.get(self.unk_token ) )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[Any]:
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = "".join(UpperCamelCase__ ).strip()
return out_string
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[int]:
"""simple docstring"""
UpperCAmelCase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] )
if len(UpperCamelCase__ ) > self.model_max_length:
UpperCAmelCase_ = input_ids[-self.model_max_length :]
return input_ids
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
"""simple docstring"""
UpperCAmelCase_ = 0
if os.path.isdir(UpperCamelCase__ ):
UpperCAmelCase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] )
else:
UpperCAmelCase_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
UpperCAmelCase_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!" )
UpperCAmelCase_ = token_index
writer.write(",".join(UpperCamelCase__ ) + "\n" )
index += 1
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as writer:
json.dump(self.emoji , UpperCamelCase__ )
return vocab_file, emoji_file
class lowercase_ ( _A ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = vocab # same as swe
UpperCAmelCase_ = ids_to_tokens # same as bpe
UpperCAmelCase_ = emoji
UpperCAmelCase_ = np.max([len(UpperCamelCase__ ) for w in self.vocab.keys()] )
UpperCAmelCase_ = re.compile(R"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)" )
UpperCAmelCase_ = re.compile(R"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*" )
UpperCAmelCase_ = re.compile(R"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}" )
UpperCAmelCase_ = re.compile(
R"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
UpperCAmelCase_ = re.compile(
R"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
UpperCAmelCase_ = re.compile(
R"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" )
UpperCAmelCase_ = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
UpperCAmelCase_ = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
UpperCAmelCase_ = str.maketrans({k: "<BLOCK>" for k in keisen + blocks} )
def __len__( self ) -> int:
"""simple docstring"""
return len(self.ids_to_tokens )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = self.content_repattera.sub("<URL>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<EMAIL>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<TEL>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<DATE>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<DATE>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<PRICE>" , UpperCamelCase__ )
UpperCAmelCase_ = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
UpperCAmelCase_ = content.replace("<BLOCK><BLOCK>" , "<BLOCK>" )
return content
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__=False ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = text.replace(" " , "<SP>" )
UpperCAmelCase_ = text.replace(" " , "<SP>" )
UpperCAmelCase_ = text.replace("\r\n" , "<BR>" )
UpperCAmelCase_ = text.replace("\n" , "<BR>" )
UpperCAmelCase_ = text.replace("\r" , "<BR>" )
UpperCAmelCase_ = text.replace("\t" , "<TAB>" )
UpperCAmelCase_ = text.replace("—" , "ー" )
UpperCAmelCase_ = text.replace("−" , "ー" )
for k, v in self.emoji["emoji"].items():
if k in text:
UpperCAmelCase_ = text.replace(UpperCamelCase__ , UpperCamelCase__ )
if clean:
UpperCAmelCase_ = self.clean_text(UpperCamelCase__ )
def check_simbol(UpperCamelCase__ ):
UpperCAmelCase_ = x.encode()
if len(UpperCamelCase__ ) == 1 and len(UpperCamelCase__ ) == 2:
UpperCAmelCase_ = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0Xc_2_a_1 and c <= 0Xc_2_b_f)
or (c >= 0Xc_7_8_0 and c <= 0Xc_7_8_3)
or (c >= 0Xc_a_b_9 and c <= 0Xc_b_b_f)
or (c >= 0Xc_c_8_0 and c <= 0Xc_d_a_2)
):
return True
return False
def checkuae(UpperCamelCase__ ):
UpperCAmelCase_ = x.encode()
if len(UpperCamelCase__ ) == 1 and len(UpperCamelCase__ ) == 3:
UpperCAmelCase_ = (int(e[0] ) << 1_6) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0Xe_2_8_0_8_0 and c <= 0Xe_2_b_0_7_f:
return True
return False
UpperCAmelCase_ = 0
UpperCAmelCase_ = []
while pos < len(UpperCamelCase__ ):
UpperCAmelCase_ = min(len(UpperCamelCase__ ) , pos + self.maxlen + 1 ) if text[pos] == "<" else pos + 3
UpperCAmelCase_ = [] # (token_id, token, pos)
for e in range(UpperCamelCase__ , UpperCamelCase__ , -1 ):
UpperCAmelCase_ = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(UpperCamelCase__ ) > 2:
UpperCAmelCase_ = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(UpperCamelCase__ ) > 0:
# the smallest token_id is adopted
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x[0] )[0]
result.append(UpperCamelCase__ )
UpperCAmelCase_ = e
else:
UpperCAmelCase_ = pos + 1
UpperCAmelCase_ = text[pos:end]
if check_simbol(UpperCamelCase__ ):
result.append("<KIGOU>" )
elif checkuae(UpperCamelCase__ ):
result.append("<U2000U2BFF>" )
else:
for i in wd.encode("utf-8" ):
result.append("<|byte%d|>" % i )
UpperCAmelCase_ = end
return result
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__="\n" ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(UpperCamelCase__ ) > 0:
words.append(bytearray(UpperCamelCase__ ).decode("utf-8" , errors="replace" ) )
UpperCAmelCase_ = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word] )
elif word == "<SP>":
words.append(" " )
elif word == "<BR>":
words.append(UpperCamelCase__ )
elif word == "<TAB>":
words.append("\t" )
elif word == "<BLOCK>":
words.append("▀" )
elif word == "<KIGOU>":
words.append("ǀ" )
elif word == "<U2000U2BFF>":
words.append("‖" )
else:
words.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
words.append(bytearray(UpperCamelCase__ ).decode("utf-8" , errors="replace" ) )
UpperCAmelCase_ = "".join(UpperCamelCase__ )
return text
| 660 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def _lowercase ( ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Tuple = {}
SCREAMING_SNAKE_CASE__ : List[Any] = 2
while True:
SCREAMING_SNAKE_CASE__ : List[Any] = factor_map.pop(A_ , A_ )
if factor:
SCREAMING_SNAKE_CASE__ : Dict = factor + prime
while x in factor_map:
x += factor
SCREAMING_SNAKE_CASE__ : List[Any] = factor
else:
SCREAMING_SNAKE_CASE__ : int = prime
yield prime
prime += 1
def _lowercase ( __lowerCAmelCase = 1E10 ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[Any] = sieve()
SCREAMING_SNAKE_CASE__ : Optional[int] = 1
while True:
SCREAMING_SNAKE_CASE__ : int = next(A_ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(A_ )
n += 2
if __name__ == "__main__":
print(solution())
| 680 | '''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
__snake_case : Union[str, Any] = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def lowerCamelCase__ ( ):
UpperCAmelCase_ = Github(os.environ["GITHUB_TOKEN"] )
UpperCAmelCase_ = g.get_repo("huggingface/diffusers" )
UpperCAmelCase_ = repo.get_issues(state="open" )
for issue in open_issues:
UpperCAmelCase_ = sorted(issue.get_comments() , key=lambda A_ : i.created_at , reverse=A_ )
UpperCAmelCase_ = comments[0] if len(A_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="closed" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open" )
issue.remove_from_labels("stale" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
issue.add_to_labels("stale" )
if __name__ == "__main__":
main()
| 660 | 0 |
"""simple docstring"""
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
lowercase_ = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, "r", encoding="utf-8") as f:
lowercase_ = json.load(f)
@require_torch
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def a ( self : Dict , a_ : Any )-> Dict:
"""simple docstring"""
return FSMTTokenizer.from_pretrained(UpperCamelCase__ )
def a ( self : List[str] , a_ : Union[str, Any] )-> str:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = FSMTForConditionalGeneration.from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["""en-ru""", 26.0],
["""ru-en""", 22.0],
["""en-de""", 22.0],
["""de-en""", 29.0],
] )
@slow
def a ( self : List[Any] , a_ : Any , a_ : Tuple )-> Tuple:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = f'''facebook/wmt19-{pair}'''
UpperCAmelCase_ : int = self.get_tokenizer(UpperCamelCase__ )
UpperCAmelCase_ : Dict = self.get_model(UpperCamelCase__ )
UpperCAmelCase_ : List[str] = bleu_data[pair]["""src"""]
UpperCAmelCase_ : Tuple = bleu_data[pair]["""tgt"""]
UpperCAmelCase_ : str = tokenizer(UpperCamelCase__ , return_tensors="""pt""" , truncation=UpperCamelCase__ , padding="""longest""" ).to(UpperCamelCase__ )
UpperCAmelCase_ : List[str] = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
UpperCAmelCase_ : Any = tokenizer.batch_decode(
UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )
UpperCAmelCase_ : Dict = calculate_bleu(UpperCamelCase__ , UpperCamelCase__ )
print(UpperCamelCase__ )
self.assertGreaterEqual(scores["""bleu"""] , UpperCamelCase__ )
| 470 | '''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
__snake_case : List[Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowercase_ ( datasets.BuilderConfig ):
a_ = 1_0000
a_ = None
a_ = None
class lowercase_ ( datasets.ArrowBasedBuilder ):
a_ = ParquetConfig
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
UpperCAmelCase_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCamelCase__ , (str, list, tuple) ):
UpperCAmelCase_ = data_files
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCAmelCase_ = [dl_manager.iter_files(UpperCamelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
UpperCAmelCase_ = []
for split_name, files in data_files.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCAmelCase_ = [dl_manager.iter_files(UpperCamelCase__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(UpperCamelCase__ ):
with open(UpperCamelCase__ , "rb" ) as f:
UpperCAmelCase_ = datasets.Features.from_arrow_schema(pq.read_schema(UpperCamelCase__ ) )
break
splits.append(datasets.SplitGenerator(name=UpperCamelCase__ , gen_kwargs={"files": files} ) )
return splits
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> pa.Table:
"""simple docstring"""
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCAmelCase_ = table_cast(UpperCamelCase__ , self.info.features.arrow_schema )
return pa_table
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" )
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCamelCase__ ) ):
with open(UpperCamelCase__ , "rb" ) as f:
UpperCAmelCase_ = pq.ParquetFile(UpperCamelCase__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
UpperCAmelCase_ = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F"""{file_idx}_{batch_idx}""", self._cast_table(UpperCamelCase__ )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(UpperCamelCase__ )}: {e}""" )
raise
| 660 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCAmelCase ( _A , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
@property
def __lowerCamelCase ( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __lowerCamelCase ( self ):
__UpperCAmelCase = ort.SessionOptions()
__UpperCAmelCase = False
return options
def __lowerCamelCase ( self ):
__UpperCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
__UpperCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
__UpperCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'runwayml/stable-diffusion-inpainting' , revision='onnx' , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
__UpperCAmelCase = 'A red cat sitting on a park bench'
__UpperCAmelCase = np.random.RandomState(0 )
__UpperCAmelCase = pipe(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , mask_image=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCamelCase__ , output_type='np' , )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
__UpperCAmelCase = np.array([0.2_5_1_4, 0.3_0_0_7, 0.3_5_1_7, 0.1_7_9_0, 0.2_3_8_2, 0.3_1_6_7, 0.1_9_4_4, 0.2_2_7_3, 0.2_4_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __lowerCamelCase ( self ):
__UpperCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
__UpperCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
__UpperCAmelCase = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-inpainting' , subfolder='scheduler' , revision='onnx' )
__UpperCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'runwayml/stable-diffusion-inpainting' , revision='onnx' , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
__UpperCAmelCase = 'A red cat sitting on a park bench'
__UpperCAmelCase = np.random.RandomState(0 )
__UpperCAmelCase = pipe(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , mask_image=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=20 , generator=UpperCamelCase__ , output_type='np' , )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
__UpperCAmelCase = np.array([0.0_0_8_6, 0.0_0_7_7, 0.0_0_8_3, 0.0_0_9_3, 0.0_1_0_7, 0.0_1_3_9, 0.0_0_9_4, 0.0_0_9_7, 0.0_1_2_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 126 | '''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case : Tuple = logging.get_logger(__name__)
__snake_case : Tuple = {'''vocab_file''': '''spiece.model'''}
__snake_case : Dict = {
'''vocab_file''': {
'''AI-Sweden/gpt-sw3-126m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-350m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-1.6b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-6.7b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-20b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model''',
}
}
__snake_case : Tuple = {
'''AI-Sweden/gpt-sw3-126m''': 20_48,
'''AI-Sweden/gpt-sw3-350m''': 20_48,
'''AI-Sweden/gpt-sw3-1.6b''': 20_48,
'''AI-Sweden/gpt-sw3-6.7b''': 20_48,
'''AI-Sweden/gpt-sw3-20b''': 20_48,
}
class lowercase_ ( _A ):
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self , UpperCamelCase__ , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> None:
"""simple docstring"""
UpperCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCAmelCase_ = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
UpperCAmelCase_ = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
UpperCAmelCase_ = "<|endoftext|>" if eos_token is None else eos_token
UpperCAmelCase_ = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
UpperCAmelCase_ = unk_token if pad_token is None else pad_token
UpperCAmelCase_ = eos_token if bos_token is None else bos_token
else:
UpperCAmelCase_ = "<pad>" if pad_token is None else pad_token
UpperCAmelCase_ = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = remove_space
UpperCAmelCase_ = keep_accents
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
# Used for whitespace normalization in input texts
# fmt : off
UpperCAmelCase_ = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
UpperCAmelCase_ = re.compile(
F"""[{"".join(map(UpperCamelCase__ , list(range(0 , 9 ) ) + list(range(1_1 , 3_2 ) ) + list(range(1_2_7 , 1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]""" )
def __getstate__( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = self.__dict__.copy()
UpperCAmelCase_ = None
return state
def __setstate__( self , UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
return len(self.sp_model )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
UpperCAmelCase_ = self.non_printing_characters_re.sub("" , UpperCamelCase__ )
# Normalize whitespaces
UpperCAmelCase_ = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
UpperCAmelCase_ = unicodedata.normalize("NFC" , UpperCamelCase__ )
return text
def lowerCamelCase_ ( self , UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = self.preprocess_text(UpperCamelCase__ )
return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> int:
"""simple docstring"""
return self.sp_model.PieceToId(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
return self.sp_model.IdToPiece(UpperCamelCase__ )
@staticmethod
def lowerCamelCase_ ( UpperCamelCase__ ) -> str:
"""simple docstring"""
return out_string
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = ""
UpperCAmelCase_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCamelCase__ ) + token
UpperCAmelCase_ = True
UpperCAmelCase_ = []
else:
current_sub_tokens.append(UpperCamelCase__ )
UpperCAmelCase_ = False
out_string += self.sp_model.decode(UpperCamelCase__ )
return out_string
def lowerCamelCase_ ( self ) -> Dict[str, int]:
"""simple docstring"""
UpperCAmelCase_ = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , "wb" ) as fi:
UpperCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
"""simple docstring"""
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = self.preprocess_text(UpperCamelCase__ )
UpperCAmelCase_ = self.sp_model.encode(UpperCamelCase__ )
else:
UpperCAmelCase_ = [self.preprocess_text(UpperCamelCase__ ) for t in text]
UpperCAmelCase_ = self.sp_model.encode(UpperCamelCase__ )
if return_tensors is True or return_tensors == "pt":
UpperCAmelCase_ = torch.tensor(UpperCamelCase__ )
return token_ids
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
return self.sp_model.decode(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[int]:
"""simple docstring"""
UpperCAmelCase_ = [F"""User: {text}""" if is_user else F"""Bot: {text}""" for is_user, text in conversation.iter_texts()]
UpperCAmelCase_ = (
F"""{self.eos_token}{self.bos_token}""" + F"""{self.bos_token}""".join(UpperCamelCase__ ) + F"""{self.bos_token}Bot:"""
)
return self.encode(text=UpperCamelCase__ )
| 660 | 0 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class _A :
def __init__( self : Union[str, Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[Any]=13 , lowerCamelCase__ : Dict=7 , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : List[Any]=False , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : int=99 , lowerCamelCase__ : Tuple=32 , lowerCamelCase__ : List[Any]=5 , lowerCamelCase__ : List[Any]=4 , lowerCamelCase__ : int=37 , lowerCamelCase__ : Dict="gelu" , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : Optional[Any]=0.1 , lowerCamelCase__ : int=5_12 , lowerCamelCase__ : str=16 , lowerCamelCase__ : Union[str, Any]=2 , lowerCamelCase__ : int=0.02 , lowerCamelCase__ : Union[str, Any]=3 , lowerCamelCase__ : List[Any]=4 , lowerCamelCase__ : List[Any]=None , ):
"""simple docstring"""
__UpperCamelCase : List[Any] = parent
__UpperCamelCase : List[Any] = batch_size
__UpperCamelCase : Tuple = seq_length
__UpperCamelCase : List[str] = is_training
__UpperCamelCase : Tuple = use_input_mask
__UpperCamelCase : Union[str, Any] = use_token_type_ids
__UpperCamelCase : Optional[int] = use_labels
__UpperCamelCase : Optional[int] = vocab_size
__UpperCamelCase : str = hidden_size
__UpperCamelCase : Dict = num_hidden_layers
__UpperCamelCase : List[str] = num_attention_heads
__UpperCamelCase : Union[str, Any] = intermediate_size
__UpperCamelCase : Optional[int] = hidden_act
__UpperCamelCase : str = hidden_dropout_prob
__UpperCamelCase : int = attention_probs_dropout_prob
__UpperCamelCase : Any = max_position_embeddings
__UpperCamelCase : Union[str, Any] = type_vocab_size
__UpperCamelCase : Optional[Any] = type_sequence_label_size
__UpperCamelCase : List[str] = initializer_range
__UpperCamelCase : Union[str, Any] = num_labels
__UpperCamelCase : Union[str, Any] = num_choices
__UpperCamelCase : Optional[int] = scope
def a ( self : List[str] ):
"""simple docstring"""
__UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase : List[Any] = None
if self.use_input_mask:
__UpperCamelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase : Optional[int] = None
if self.use_token_type_ids:
__UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase : int = None
__UpperCamelCase : Any = None
__UpperCamelCase : int = None
if self.use_labels:
__UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a ( self : int ):
"""simple docstring"""
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
def a ( self : Optional[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : int , lowerCamelCase__ : int ):
"""simple docstring"""
__UpperCamelCase : Tuple = LlamaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__UpperCamelCase : str = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
__UpperCamelCase : Dict = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a ( self : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Any , ):
"""simple docstring"""
__UpperCamelCase : List[Any] = True
__UpperCamelCase : int = LlamaModel(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__UpperCamelCase : List[Any] = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )
__UpperCamelCase : List[Any] = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , )
__UpperCamelCase : str = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a ( self : Any , lowerCamelCase__ : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Tuple , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : Union[str, Any] , ):
"""simple docstring"""
__UpperCamelCase : Tuple = LlamaForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__UpperCamelCase : Dict = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a ( self : Optional[Any] , lowerCamelCase__ : int , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Dict , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : Any , ):
"""simple docstring"""
__UpperCamelCase : List[str] = True
__UpperCamelCase : Optional[Any] = True
__UpperCamelCase : Tuple = LlamaForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# first forward pass
__UpperCamelCase : Any = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ , )
__UpperCamelCase : List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__UpperCamelCase : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCamelCase : Union[str, Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__UpperCamelCase : Any = torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCamelCase : Optional[int] = torch.cat([input_mask, next_mask] , dim=-1 )
__UpperCamelCase : List[str] = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["""hidden_states"""][0]
__UpperCamelCase : str = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["""hidden_states"""][0]
# select random slice
__UpperCamelCase : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCamelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCamelCase : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
def a ( self : Any ):
"""simple docstring"""
__UpperCamelCase : List[Any] = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) : Optional[int] = config_and_inputs
__UpperCamelCase : int = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _A ( _A , _A , _A , unittest.TestCase ):
lowercase_ : Dict = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
lowercase_ : Dict = (LlamaForCausalLM,) if is_torch_available() else ()
lowercase_ : Dict = (
{
'''feature-extraction''': LlamaModel,
'''text-classification''': LlamaForSequenceClassification,
'''text-generation''': LlamaForCausalLM,
'''zero-shot''': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase_ : int = False
lowercase_ : str = False
def a ( self : Dict ):
"""simple docstring"""
__UpperCamelCase : List[Any] = LlamaModelTester(self )
__UpperCamelCase : Optional[Any] = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def a ( self : Union[str, Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def a ( self : List[Any] ):
"""simple docstring"""
__UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def a ( self : Tuple ):
"""simple docstring"""
__UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCamelCase : Union[str, Any] = type
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def a ( self : List[Any] ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : List[str] = 3
__UpperCamelCase : Any = input_dict["""input_ids"""]
__UpperCamelCase : Dict = input_ids.ne(1 ).to(UpperCamelCase__ )
__UpperCamelCase : Optional[int] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__UpperCamelCase : List[str] = LlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__UpperCamelCase : str = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def a ( self : str ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : Dict = 3
__UpperCamelCase : str = """single_label_classification"""
__UpperCamelCase : Tuple = input_dict["""input_ids"""]
__UpperCamelCase : int = input_ids.ne(1 ).to(UpperCamelCase__ )
__UpperCamelCase : Dict = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__UpperCamelCase : int = LlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__UpperCamelCase : Optional[Any] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def a ( self : str ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : List[str] = 3
__UpperCamelCase : Optional[Any] = """multi_label_classification"""
__UpperCamelCase : Any = input_dict["""input_ids"""]
__UpperCamelCase : Dict = input_ids.ne(1 ).to(UpperCamelCase__ )
__UpperCamelCase : Tuple = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__UpperCamelCase : List[str] = LlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__UpperCamelCase : List[Any] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" )
def a ( self : List[str] ):
"""simple docstring"""
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def a ( self : Optional[int] , lowerCamelCase__ : str ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : Any = ids_tensor([1, 10] , config.vocab_size )
__UpperCamelCase : List[str] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__UpperCamelCase : Any = LlamaModel(UpperCamelCase__ )
original_model.to(UpperCamelCase__ )
original_model.eval()
__UpperCamelCase : Dict = original_model(UpperCamelCase__ ).last_hidden_state
__UpperCamelCase : Union[str, Any] = original_model(UpperCamelCase__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__UpperCamelCase : Dict = {"""type""": scaling_type, """factor""": 10.0}
__UpperCamelCase : Optional[Any] = LlamaModel(UpperCamelCase__ )
scaled_model.to(UpperCamelCase__ )
scaled_model.eval()
__UpperCamelCase : Optional[Any] = scaled_model(UpperCamelCase__ ).last_hidden_state
__UpperCamelCase : Any = scaled_model(UpperCamelCase__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
@require_torch
class _A ( unittest.TestCase ):
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def a ( self : Optional[int] ):
"""simple docstring"""
__UpperCamelCase : List[Any] = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
__UpperCamelCase : int = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" , device_map="""auto""" )
__UpperCamelCase : str = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
__UpperCamelCase : List[Any] = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase__ , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__UpperCamelCase : Any = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCamelCase__ , atol=1e-5 , rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def a ( self : Optional[Any] ):
"""simple docstring"""
__UpperCamelCase : List[Any] = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
__UpperCamelCase : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" , device_map="""auto""" )
__UpperCamelCase : int = model(torch.tensor(UpperCamelCase__ ) )
# Expected mean on dim = -1
__UpperCamelCase : Tuple = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase__ , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__UpperCamelCase : str = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCamelCase__ , atol=1e-5 , rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def a ( self : Optional[Any] ):
"""simple docstring"""
__UpperCamelCase : Any = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
__UpperCamelCase : Union[str, Any] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" , device_map="""auto""" )
__UpperCamelCase : Optional[int] = model(torch.tensor(UpperCamelCase__ ) )
# Expected mean on dim = -1
__UpperCamelCase : int = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase__ , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__UpperCamelCase : Dict = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase__ , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
"""Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" )
@slow
def a ( self : Any ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
__UpperCamelCase : int = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" , device_map="""auto""" )
__UpperCamelCase : Dict = model(torch.tensor(UpperCamelCase__ ) )
__UpperCamelCase : Dict = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase__ , atol=1e-2 , rtol=1e-2 )
# fmt: off
__UpperCamelCase : int = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCamelCase__ , atol=1e-5 , rtol=1e-5 )
@unittest.skip("""Model is curently gated""" )
@slow
def a ( self : Dict ):
"""simple docstring"""
__UpperCamelCase : Any = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"""
__UpperCamelCase : Dict = """Simply put, the theory of relativity states that """
__UpperCamelCase : Union[str, Any] = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" )
__UpperCamelCase : Optional[Any] = tokenizer.encode(UpperCamelCase__ , return_tensors="""pt""" )
__UpperCamelCase : Optional[Any] = LlamaForCausalLM.from_pretrained(
"""meta-llama/Llama-2-13b-chat-hf""" , device_map="""sequential""" , use_safetensors=UpperCamelCase__ )
# greedy generation outputs
__UpperCamelCase : List[str] = model.generate(UpperCamelCase__ , max_new_tokens=64 , top_p=UpperCamelCase__ , temperature=1 , do_sample=UpperCamelCase__ )
__UpperCamelCase : int = tokenizer.decode(generated_ids[0] , skip_special_tokens=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
| 269 | '''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowercase_ ( unittest.TestCase ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__=7 , UpperCamelCase__=3 , UpperCamelCase__=1_8 , UpperCamelCase__=3_0 , UpperCamelCase__=4_0_0 , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=[0.5, 0.5, 0.5] , UpperCamelCase__=[0.5, 0.5, 0.5] , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = size if size is not None else {"shortest_edge": 1_8}
UpperCAmelCase_ = crop_size if crop_size is not None else {"height": 1_8, "width": 1_8}
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
def lowerCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowercase_ ( _A , unittest.TestCase ):
a_ = LevitImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = LevitImageProcessingTester(self )
@property
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , "image_mean" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "image_std" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_resize" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_center_crop" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "size" ) )
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 1_8} )
self.assertEqual(image_processor.crop_size , {"height": 1_8, "width": 1_8} )
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2} )
self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4} )
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
pass
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCamelCase_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 660 | 0 |
'''simple docstring'''
from typing import List
from .keymap import KEYMAP, get_character
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def decorator(SCREAMING_SNAKE_CASE__ ):
_SCREAMING_SNAKE_CASE : Dict = getattr(A_ , """handle_key""" , [] )
handle += [key]
setattr(A_ , """handle_key""" , A_ )
return func
return decorator
def snake_case_ ( *SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def decorator(SCREAMING_SNAKE_CASE__ ):
_SCREAMING_SNAKE_CASE : str = getattr(A_ , """handle_key""" , [] )
handle += keys
setattr(A_ , """handle_key""" , A_ )
return func
return decorator
class lowercase__ ( _A ):
'''simple docstring'''
def __new__( cls , __snake_case , __snake_case , __snake_case ):
_SCREAMING_SNAKE_CASE : Dict = super().__new__(cls , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not hasattr(UpperCamelCase__ , """key_handler""" ):
setattr(UpperCamelCase__ , """key_handler""" , {} )
setattr(UpperCamelCase__ , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
_SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(UpperCamelCase__ , """handle_key""" , [] )
for key in handled_keys:
_SCREAMING_SNAKE_CASE : Tuple = value
return new_cls
@staticmethod
def UpperCAmelCase_ ( cls ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = get_character()
if char != KEYMAP["undefined"]:
_SCREAMING_SNAKE_CASE : Optional[int] = ord(UpperCamelCase__ )
_SCREAMING_SNAKE_CASE : Tuple = cls.key_handler.get(UpperCamelCase__ )
if handler:
_SCREAMING_SNAKE_CASE : Any = char
return handler(cls )
else:
return None
def snake_case_ ( cls ):
"""simple docstring"""
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 533 | '''simple docstring'''
from __future__ import annotations
import math
from collections.abc import Callable
def lowerCamelCase__ ( A_ , A_ , A_ , A_ = 100 , ):
UpperCAmelCase_ = x_start
UpperCAmelCase_ = fnc(A_ )
UpperCAmelCase_ = 0.0
for _ in range(A_ ):
# Approximates curve as a sequence of linear lines and sums their length
UpperCAmelCase_ = (x_end - x_start) / steps + xa
UpperCAmelCase_ = fnc(A_ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
UpperCAmelCase_ = xa
UpperCAmelCase_ = fxa
return length
if __name__ == "__main__":
def lowerCamelCase__ ( A_ ):
return math.sin(10 * x )
print('''f(x) = sin(10 * x)''')
print('''The length of the curve from x = -10 to x = 10 is:''')
__snake_case : List[Any] = 10
while i <= 10_00_00:
print(F'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 660 | 0 |
def a (_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = len(A_ )
for i in range(1 , A_ ):
SCREAMING_SNAKE_CASE_ = collection[i]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = i - 1
while low <= high:
SCREAMING_SNAKE_CASE_ = (low + high) // 2
if val < collection[mid]:
SCREAMING_SNAKE_CASE_ = mid - 1
else:
SCREAMING_SNAKE_CASE_ = mid + 1
for j in range(A_ , A_ , -1 ):
SCREAMING_SNAKE_CASE_ = collection[j - 1]
SCREAMING_SNAKE_CASE_ = val
return collection
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =input("""Enter numbers separated by a comma:\n""").strip()
__SCREAMING_SNAKE_CASE =[int(item) for item in user_input.split(""",""")]
print(binary_insertion_sort(unsorted))
| 234 | '''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class lowercase_ ( _A ):
a_ = """"""
a_ = """hf-legacy""" # "hf://"" is reserved for hffs
def __init__( self , UpperCamelCase__ = None , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> Tuple:
"""simple docstring"""
super().__init__(self , **UpperCamelCase__ )
UpperCAmelCase_ = repo_info
UpperCAmelCase_ = token
UpperCAmelCase_ = None
def lowerCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
if self.dir_cache is None:
UpperCAmelCase_ = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
UpperCAmelCase_ = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(UpperCamelCase__ ): {"name": str(UpperCamelCase__ ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = "rb" , **UpperCamelCase__ , ) -> Optional[int]:
"""simple docstring"""
if not isinstance(self.repo_info , UpperCamelCase__ ):
raise NotImplementedError(F"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
UpperCAmelCase_ = hf_hub_url(self.repo_info.id , UpperCamelCase__ , revision=self.repo_info.sha )
return fsspec.open(
UpperCamelCase__ , mode=UpperCamelCase__ , headers=get_authentication_headers_for_url(UpperCamelCase__ , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open()
def lowerCamelCase_ ( self , UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
"""simple docstring"""
self._get_dirs()
UpperCAmelCase_ = self._strip_protocol(UpperCamelCase__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__=False , **UpperCamelCase__ ) -> str:
"""simple docstring"""
self._get_dirs()
UpperCAmelCase_ = PurePosixPath(path.strip("/" ) )
UpperCAmelCase_ = {}
for p, f in self.dir_cache.items():
UpperCAmelCase_ = PurePosixPath(p.strip("/" ) )
UpperCAmelCase_ = p.parent
if root == path:
UpperCAmelCase_ = f
UpperCAmelCase_ = list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out )
| 660 | 0 |
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
__UpperCAmelCase = {
'''facebook/maskformer-swin-base-ade''': (
'''https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'''
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
__UpperCAmelCase = logging.get_logger(__name__)
class lowerCamelCase (_A ):
'''simple docstring'''
_snake_case : List[str] = '''maskformer'''
_snake_case : Union[str, Any] = {'''hidden_size''': '''mask_feature_size'''}
_snake_case : Dict = ['''resnet''', '''swin''']
_snake_case : List[Any] = ['''detr''']
def __init__( self , _UpperCamelCase = 2_5_6 , _UpperCamelCase = 2_5_6 , _UpperCamelCase = 0.1 , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 0.02 , _UpperCamelCase = 1.0 , _UpperCamelCase = 1.0 , _UpperCamelCase = 1.0 , _UpperCamelCase = 20.0 , _UpperCamelCase = None , **_UpperCamelCase , ) -> Optional[int]:
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
UpperCAmelCase_ : Dict = SwinConfig(
image_size=3_8_4 , in_channels=3 , patch_size=4 , embed_dim=1_2_8 , depths=[2, 2, 1_8, 2] , num_heads=[4, 8, 1_6, 3_2] , window_size=1_2 , drop_path_rate=0.3 , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ : List[Any] = backbone_config.pop('model_type' )
UpperCAmelCase_ : Optional[int] = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase_ : Any = config_class.from_dict(UpperCamelCase__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. "
f"Supported model types: {','.join(self.backbones_supported )}" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
UpperCAmelCase_ : Dict = DetrConfig()
else:
# verify that the decoder is supported
UpperCAmelCase_ : Optional[int] = (
decoder_config.pop('model_type' ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f"Transformer Decoder {decoder_type} not supported, please use one of"
f" {','.join(self.decoders_supported )}" )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ : Tuple = CONFIG_MAPPING[decoder_type]
UpperCAmelCase_ : List[str] = config_class.from_dict(UpperCamelCase__ )
UpperCAmelCase_ : Optional[int] = backbone_config
UpperCAmelCase_ : Tuple = decoder_config
# main feature dimension for the model
UpperCAmelCase_ : Optional[int] = fpn_feature_size
UpperCAmelCase_ : int = mask_feature_size
# initializer
UpperCAmelCase_ : int = init_std
UpperCAmelCase_ : int = init_xavier_std
# Hungarian matcher && loss
UpperCAmelCase_ : str = cross_entropy_weight
UpperCAmelCase_ : Optional[int] = dice_weight
UpperCAmelCase_ : str = mask_weight
UpperCAmelCase_ : Union[str, Any] = use_auxiliary_loss
UpperCAmelCase_ : Any = no_object_weight
UpperCAmelCase_ : Optional[int] = output_auxiliary_logits
UpperCAmelCase_ : Optional[int] = self.decoder_config.encoder_attention_heads
UpperCAmelCase_ : int = self.decoder_config.num_hidden_layers
super().__init__(**UpperCamelCase__ )
@classmethod
def __UpperCAmelCase ( cls , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ) -> List[str]:
return cls(
backbone_config=UpperCamelCase__ , decoder_config=UpperCamelCase__ , **UpperCamelCase__ , )
def __UpperCAmelCase ( self ) -> Dict[str, any]:
UpperCAmelCase_ : int = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : List[str] = self.backbone_config.to_dict()
UpperCAmelCase_ : int = self.decoder_config.to_dict()
UpperCAmelCase_ : str = self.__class__.model_type
return output
| 406 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case : Union[str, Any] = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = ['''PLBartTokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[int] = [
'''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PLBartForCausalLM''',
'''PLBartForConditionalGeneration''',
'''PLBartForSequenceClassification''',
'''PLBartModel''',
'''PLBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
__snake_case : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 660 | 0 |
"""simple docstring"""
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''google/umt5-small''': '''https://huggingface.co/google/umt5-small/resolve/main/config.json''',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class a__ ( _A ):
lowercase_ = "umt5"
lowercase_ = ["past_key_values"]
def __init__( self : Dict , UpperCamelCase_ : int=250112 , UpperCamelCase_ : Optional[Any]=512 , UpperCamelCase_ : List[Any]=64 , UpperCamelCase_ : Tuple=1024 , UpperCamelCase_ : Optional[Any]=8 , UpperCamelCase_ : int=None , UpperCamelCase_ : str=6 , UpperCamelCase_ : Dict=32 , UpperCamelCase_ : int=128 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : List[Any]=1e-6 , UpperCamelCase_ : List[str]=1.0 , UpperCamelCase_ : str="gated-gelu" , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : Any=True , UpperCamelCase_ : str="T5Tokenizer" , UpperCamelCase_ : str=True , UpperCamelCase_ : List[Any]=0 , UpperCamelCase_ : List[str]=1 , UpperCamelCase_ : str=0 , **UpperCamelCase_ : List[str] , ):
"""simple docstring"""
super().__init__(
is_encoder_decoder=UpperCamelCase__ , tokenizer_class=UpperCamelCase__ , tie_word_embeddings=UpperCamelCase__ , pad_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : Union[str, Any] = d_model
__UpperCAmelCase : str = d_kv
__UpperCAmelCase : Optional[int] = d_ff
__UpperCAmelCase : Any = num_layers
__UpperCAmelCase : List[Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__UpperCAmelCase : Union[str, Any] = num_heads
__UpperCAmelCase : List[Any] = relative_attention_num_buckets
__UpperCAmelCase : List[str] = relative_attention_max_distance
__UpperCAmelCase : Any = dropout_rate
__UpperCAmelCase : List[Any] = layer_norm_epsilon
__UpperCAmelCase : Optional[Any] = initializer_factor
__UpperCAmelCase : Tuple = feed_forward_proj
__UpperCAmelCase : Optional[Any] = use_cache
__UpperCAmelCase : Dict = self.feed_forward_proj.split("-")
__UpperCAmelCase : Tuple = act_info[-1]
__UpperCAmelCase : Tuple = act_info[0] == "gated"
if len(UpperCamelCase__) > 1 and act_info[0] != "gated" or len(UpperCamelCase__) > 2:
raise ValueError(
F"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'")
if feed_forward_proj == "gated-gelu":
__UpperCAmelCase : str = "gelu_new"
@property
def a_ ( self : str):
"""simple docstring"""
return self.d_model
@property
def a_ ( self : int):
"""simple docstring"""
return self.num_heads
@property
def a_ ( self : List[Any]):
"""simple docstring"""
return self.num_layers
class a__ ( _A ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def a_ ( self : str):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
__UpperCAmelCase : Optional[Any] = "past_encoder_sequence + sequence"
__UpperCAmelCase : Dict = {0: "batch"}
__UpperCAmelCase : List[str] = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
__UpperCAmelCase : int = {0: "batch", 1: "decoder_sequence"}
__UpperCAmelCase : Optional[int] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction="inputs")
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def a_ ( self : Optional[int]):
"""simple docstring"""
return 13
@property
def a_ ( self : int):
"""simple docstring"""
return 5e-4
| 77 | '''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__snake_case : List[str] = logging.get_logger(__name__)
class lowercase_ ( _A ):
a_ = """linear"""
a_ = """cosine"""
a_ = """cosine_with_restarts"""
a_ = """polynomial"""
a_ = """constant"""
a_ = """constant_with_warmup"""
a_ = """piecewise_constant"""
def lowerCamelCase__ ( A_ , A_ = -1 ):
return LambdaLR(A_ , lambda A_ : 1 , last_epoch=A_ )
def lowerCamelCase__ ( A_ , A_ , A_ = -1 ):
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1.0 , A_ ) )
return 1.0
return LambdaLR(A_ , A_ , last_epoch=A_ )
def lowerCamelCase__ ( A_ , A_ , A_ = -1 ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = step_rules.split("," )
for rule_str in rule_list[:-1]:
UpperCAmelCase_ , UpperCAmelCase_ = rule_str.split(":" )
UpperCAmelCase_ = int(A_ )
UpperCAmelCase_ = float(A_ )
UpperCAmelCase_ = value
UpperCAmelCase_ = float(rule_list[-1] )
def create_rules_function(A_ , A_ ):
def rule_func(A_ ) -> float:
UpperCAmelCase_ = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(A_ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
UpperCAmelCase_ = create_rules_function(A_ , A_ )
return LambdaLR(A_ , A_ , last_epoch=A_ )
def lowerCamelCase__ ( A_ , A_ , A_ , A_=-1 ):
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1 , A_ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(A_ , A_ , A_ )
def lowerCamelCase__ ( A_ , A_ , A_ , A_ = 0.5 , A_ = -1 ):
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1 , A_ ) )
UpperCAmelCase_ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(A_ ) * 2.0 * progress )) )
return LambdaLR(A_ , A_ , A_ )
def lowerCamelCase__ ( A_ , A_ , A_ , A_ = 1 , A_ = -1 ):
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1 , A_ ) )
UpperCAmelCase_ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(A_ ) * progress) % 1.0) )) )
return LambdaLR(A_ , A_ , A_ )
def lowerCamelCase__ ( A_ , A_ , A_ , A_=1e-7 , A_=1.0 , A_=-1 ):
UpperCAmelCase_ = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1 , A_ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
UpperCAmelCase_ = lr_init - lr_end
UpperCAmelCase_ = num_training_steps - num_warmup_steps
UpperCAmelCase_ = 1 - (current_step - num_warmup_steps) / decay_steps
UpperCAmelCase_ = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(A_ , A_ , A_ )
__snake_case : str = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowerCamelCase__ ( A_ , A_ , A_ = None , A_ = None , A_ = None , A_ = 1 , A_ = 1.0 , A_ = -1 , ):
UpperCAmelCase_ = SchedulerType(A_ )
UpperCAmelCase_ = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(A_ , last_epoch=A_ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(A_ , step_rules=A_ , last_epoch=A_ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(A_ , num_warmup_steps=A_ , last_epoch=A_ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
A_ , num_warmup_steps=A_ , num_training_steps=A_ , num_cycles=A_ , last_epoch=A_ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
A_ , num_warmup_steps=A_ , num_training_steps=A_ , power=A_ , last_epoch=A_ , )
return schedule_func(
A_ , num_warmup_steps=A_ , num_training_steps=A_ , last_epoch=A_ )
| 660 | 0 |
"""simple docstring"""
import math
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = len(A_ )
__SCREAMING_SNAKE_CASE = int(math.floor(math.sqrt(A_ ) ) )
__SCREAMING_SNAKE_CASE = 0
while arr[min(A_ , A_ ) - 1] < x:
__SCREAMING_SNAKE_CASE = step
step += int(math.floor(math.sqrt(A_ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
__SCREAMING_SNAKE_CASE = prev + 1
if prev == min(A_ , A_ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
a__ : str = input('''Enter numbers separated by a comma:\n''').strip()
a__ : Optional[Any] = [int(item) for item in user_input.split(''',''')]
a__ : Any = int(input('''Enter the number to be searched:\n'''))
a__ : Tuple = jump_search(arr, x)
if res == -1:
print('''Number not found!''')
else:
print(F"Number {x} is at index {res}")
| 682 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case : Optional[int] = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[int] = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[Any] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
__snake_case : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 660 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
_UpperCamelCase = None
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
_UpperCamelCase = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/tokenizer.json''',
},
}
_UpperCamelCase = {
'''camembert-base''': 5_12,
}
_UpperCamelCase = '''▁'''
class __UpperCAmelCase (_A ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = VOCAB_FILES_NAMES
_UpperCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Dict = ['input_ids', 'attention_mask']
_UpperCamelCase : Optional[Any] = CamembertTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_="<mask>" , snake_case_=["<s>NOTUSED", "</s>NOTUSED"] , **snake_case_ , ):
'''simple docstring'''
A__ : List[Any] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
A__ : List[Any] = vocab_file
A__ : Union[str, Any] = False if not self.vocab_file else True
def lowerCamelCase ( self , snake_case_ , snake_case_ = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A__ : Union[str, Any] = [self.cls_token_id]
A__ : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase ( self , snake_case_ , snake_case_ = None ):
'''simple docstring'''
A__ : int = [self.sep_token_id]
A__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase ( self , snake_case_ , snake_case_ = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
A__ : int = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
return (out_vocab_file,)
| 363 | '''simple docstring'''
import csv
import tweepy
# Twitter API credentials
__snake_case : Union[str, Any] = ''''''
__snake_case : List[Any] = ''''''
__snake_case : List[str] = ''''''
__snake_case : Any = ''''''
def lowerCamelCase__ ( A_ ):
# authorize twitter, initialize tweepy
UpperCAmelCase_ = tweepy.OAuthHandler(A_ , A_ )
auth.set_access_token(A_ , A_ )
UpperCAmelCase_ = tweepy.API(A_ )
# initialize a list to hold all the tweepy Tweets
UpperCAmelCase_ = []
# make initial request for most recent tweets (200 is the maximum allowed count)
UpperCAmelCase_ = api.user_timeline(screen_name=A_ , count=200 )
# save most recent tweets
alltweets.extend(A_ )
# save the id of the oldest tweet less one
UpperCAmelCase_ = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(A_ ) > 0:
print(F"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
UpperCAmelCase_ = api.user_timeline(
screen_name=A_ , count=200 , max_id=A_ )
# save most recent tweets
alltweets.extend(A_ )
# update the id of the oldest tweet less one
UpperCAmelCase_ = alltweets[-1].id - 1
print(F"""...{len(A_ )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
UpperCAmelCase_ = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"""new_{screen_name}_tweets.csv""" , "w" ) as f:
UpperCAmelCase_ = csv.writer(A_ )
writer.writerow(["id", "created_at", "text"] )
writer.writerows(A_ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''')
| 660 | 0 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self , lowerCamelCase__ ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ):
lowerCAmelCase_: List[str] = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(UpperCamelCase__ )
def _a ( self ):
lowerCAmelCase_: Any = "sshleifer/tiny-gpt2"
lowerCAmelCase_: Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=UpperCamelCase__ , multi_process=UpperCamelCase__ , )
lowerCAmelCase_: str = TensorFlowBenchmark(UpperCamelCase__ )
lowerCAmelCase_: Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _a ( self ):
lowerCAmelCase_: Optional[Any] = "sgugger/tiny-distilbert-classification"
lowerCAmelCase_: Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , only_pretrain_model=UpperCamelCase__ , )
lowerCAmelCase_: Dict = TensorFlowBenchmark(UpperCamelCase__ )
lowerCAmelCase_: Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _a ( self ):
lowerCAmelCase_: Optional[Any] = "sshleifer/tiny-gpt2"
lowerCAmelCase_: Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
lowerCAmelCase_: Tuple = TensorFlowBenchmark(UpperCamelCase__ )
lowerCAmelCase_: List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _a ( self ):
lowerCAmelCase_: Optional[Any] = "sshleifer/tiny-gpt2"
lowerCAmelCase_: Dict = AutoConfig.from_pretrained(UpperCamelCase__ )
lowerCAmelCase_: Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=UpperCamelCase__ , multi_process=UpperCamelCase__ , )
lowerCAmelCase_: str = TensorFlowBenchmark(UpperCamelCase__ , [config] )
lowerCAmelCase_: Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _a ( self ):
lowerCAmelCase_: List[str] = "sshleifer/tiny-gpt2"
lowerCAmelCase_: Union[str, Any] = AutoConfig.from_pretrained(UpperCamelCase__ )
lowerCAmelCase_: Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
lowerCAmelCase_: int = TensorFlowBenchmark(UpperCamelCase__ , [config] )
lowerCAmelCase_: Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _a ( self ):
lowerCAmelCase_: str = "sshleifer/tiny-gpt2"
lowerCAmelCase_: Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
lowerCAmelCase_: List[str] = TensorFlowBenchmark(UpperCamelCase__ )
lowerCAmelCase_: Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _a ( self ):
lowerCAmelCase_: Any = "sshleifer/tiny-gpt2"
lowerCAmelCase_: Tuple = AutoConfig.from_pretrained(UpperCamelCase__ )
lowerCAmelCase_: Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
lowerCAmelCase_: Optional[int] = TensorFlowBenchmark(UpperCamelCase__ , [config] )
lowerCAmelCase_: List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _a ( self ):
lowerCAmelCase_: Any = "patrickvonplaten/t5-tiny-random"
lowerCAmelCase_: Union[str, Any] = AutoConfig.from_pretrained(UpperCamelCase__ )
lowerCAmelCase_: List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
lowerCAmelCase_: Union[str, Any] = TensorFlowBenchmark(UpperCamelCase__ , configs=[config] )
lowerCAmelCase_: Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("GPU" ) ) == 0 , "Cannot do xla on CPU." )
def _a ( self ):
lowerCAmelCase_: Tuple = "sshleifer/tiny-gpt2"
lowerCAmelCase_: List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=UpperCamelCase__ , multi_process=UpperCamelCase__ , )
lowerCAmelCase_: Union[str, Any] = TensorFlowBenchmark(UpperCamelCase__ )
lowerCAmelCase_: Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _a ( self ):
lowerCAmelCase_: List[Any] = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase_: Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=UpperCamelCase__ , save_to_csv=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(UpperCamelCase__ , "inf_time.csv" ) , inference_memory_csv_file=os.path.join(UpperCamelCase__ , "inf_mem.csv" ) , env_info_csv_file=os.path.join(UpperCamelCase__ , "env.csv" ) , multi_process=UpperCamelCase__ , )
lowerCAmelCase_: Optional[int] = TensorFlowBenchmark(UpperCamelCase__ )
benchmark.run()
self.assertTrue(Path(os.path.join(UpperCamelCase__ , "inf_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , "inf_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , "env.csv" ) ).exists() )
def _a ( self ):
lowerCAmelCase_: Tuple = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(lowerCamelCase__ ):
self.assertTrue(hasattr(UpperCamelCase__ , "sequential" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "cumulative" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "current" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "total" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase_: Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(UpperCamelCase__ , "log.txt" ) , log_print=UpperCamelCase__ , trace_memory_line_by_line=UpperCamelCase__ , eager_mode=UpperCamelCase__ , multi_process=UpperCamelCase__ , )
lowerCAmelCase_: List[Any] = TensorFlowBenchmark(UpperCamelCase__ )
lowerCAmelCase_: Optional[Any] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , "log.txt" ) ).exists() ) | 613 | '''simple docstring'''
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
__snake_case : int = logging.get_logger(__name__)
class lowercase_ ( _A ):
def __init__( self , **UpperCamelCase__ ) -> Tuple:
"""simple docstring"""
requires_backends(self , ["bs4"] )
super().__init__(**UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
UpperCAmelCase_ = parent.find_all(child.name , recursive=UpperCamelCase__ )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(UpperCamelCase__ ) else next(i for i, s in enumerate(UpperCamelCase__ , 1 ) if s is child ) )
UpperCAmelCase_ = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = BeautifulSoup(UpperCamelCase__ , "html.parser" )
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for element in html_code.descendants:
if type(UpperCamelCase__ ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
UpperCAmelCase_ = html.unescape(UpperCamelCase__ ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = self.xpath_soup(UpperCamelCase__ )
stringaxtag_seq.append(UpperCamelCase__ )
stringaxsubs_seq.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError("Number of doc strings and xtags does not correspond" )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError("Number of doc strings and xsubs does not correspond" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = ""
for tagname, subs in zip(UpperCamelCase__ , UpperCamelCase__ ):
xpath += F"""/{tagname}"""
if subs != 0:
xpath += F"""[{subs}]"""
return xpath
def __call__( self , UpperCamelCase__ ) -> BatchFeature:
"""simple docstring"""
UpperCAmelCase_ = False
# Check that strings has a valid type
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = True
elif isinstance(UpperCamelCase__ , (list, tuple) ):
if len(UpperCamelCase__ ) == 0 or isinstance(html_strings[0] , UpperCamelCase__ ):
UpperCAmelCase_ = True
if not valid_strings:
raise ValueError(
"HTML strings must of type `str`, `List[str]` (batch of examples), "
F"""but is of type {type(UpperCamelCase__ )}.""" )
UpperCAmelCase_ = bool(isinstance(UpperCamelCase__ , (list, tuple) ) and (isinstance(html_strings[0] , UpperCamelCase__ )) )
if not is_batched:
UpperCAmelCase_ = [html_strings]
# Get nodes + xpaths
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for html_string in html_strings:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.get_three_from_single(UpperCamelCase__ )
nodes.append(UpperCamelCase__ )
UpperCAmelCase_ = []
for node, tag_list, sub_list in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = self.construct_xpath(UpperCamelCase__ , UpperCamelCase__ )
xpath_strings.append(UpperCamelCase__ )
xpaths.append(UpperCamelCase__ )
# return as Dict
UpperCAmelCase_ = {"nodes": nodes, "xpaths": xpaths}
UpperCAmelCase_ = BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
return encoded_inputs
| 660 | 0 |
"""simple docstring"""
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Any:
SCREAMING_SNAKE_CASE__ : Any = XCLIPTextConfig()
# derive patch size from model name
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_name.find("""patch""" )
SCREAMING_SNAKE_CASE__ : Tuple = int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] )
SCREAMING_SNAKE_CASE__ : int = XCLIPVisionConfig(patch_size=A_ , num_frames=A_ )
if "large" in model_name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 768
SCREAMING_SNAKE_CASE__ : Optional[Any] = 3072
SCREAMING_SNAKE_CASE__ : Tuple = 12
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1024
SCREAMING_SNAKE_CASE__ : List[Any] = 4096
SCREAMING_SNAKE_CASE__ : str = 16
SCREAMING_SNAKE_CASE__ : Tuple = 24
SCREAMING_SNAKE_CASE__ : Tuple = 768
SCREAMING_SNAKE_CASE__ : Optional[int] = 3072
if model_name == "xclip-large-patch14-16-frames":
SCREAMING_SNAKE_CASE__ : Dict = 336
SCREAMING_SNAKE_CASE__ : List[str] = XCLIPConfig.from_text_vision_configs(A_ , A_ )
if "large" in model_name:
SCREAMING_SNAKE_CASE__ : Optional[int] = 768
return config
def _lowercase ( __lowerCAmelCase ) -> str:
# text encoder
if name == "token_embedding.weight":
SCREAMING_SNAKE_CASE__ : List[Any] = name.replace("""token_embedding.weight""" , """text_model.embeddings.token_embedding.weight""" )
if name == "positional_embedding":
SCREAMING_SNAKE_CASE__ : Optional[int] = name.replace("""positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "ln_1" in name:
SCREAMING_SNAKE_CASE__ : Tuple = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
SCREAMING_SNAKE_CASE__ : Any = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
SCREAMING_SNAKE_CASE__ : Optional[Any] = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
SCREAMING_SNAKE_CASE__ : Any = name.replace("""c_proj""" , """fc2""" )
if name.startswith("""transformer.resblocks""" ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.replace("""transformer.resblocks""" , """text_model.encoder.layers""" )
if "attn.out_proj" in name and "message" not in name:
SCREAMING_SNAKE_CASE__ : List[str] = name.replace("""attn.out_proj""" , """self_attn.out_proj""" )
if "ln_final" in name:
SCREAMING_SNAKE_CASE__ : int = name.replace("""ln_final""" , """text_model.final_layer_norm""" )
# visual encoder
if name == "visual.class_embedding":
SCREAMING_SNAKE_CASE__ : Any = name.replace("""visual.class_embedding""" , """vision_model.embeddings.class_embedding""" )
if name == "visual.positional_embedding":
SCREAMING_SNAKE_CASE__ : int = name.replace("""visual.positional_embedding""" , """vision_model.embeddings.position_embedding.weight""" )
if name.startswith("""visual.transformer.resblocks""" ):
SCREAMING_SNAKE_CASE__ : Tuple = name.replace("""visual.transformer.resblocks""" , """vision_model.encoder.layers""" )
if "visual.conv1" in name:
SCREAMING_SNAKE_CASE__ : Any = name.replace("""visual.conv1""" , """vision_model.embeddings.patch_embedding""" )
if "visual.ln_pre" in name:
SCREAMING_SNAKE_CASE__ : Optional[Any] = name.replace("""visual.ln_pre""" , """vision_model.pre_layernorm""" )
if "visual.ln_post" in name:
SCREAMING_SNAKE_CASE__ : Tuple = name.replace("""visual.ln_post""" , """vision_model.post_layernorm""" )
if "visual.proj" in name:
SCREAMING_SNAKE_CASE__ : Optional[int] = name.replace("""visual.proj""" , """visual_projection.weight""" )
if "text_projection" in name:
SCREAMING_SNAKE_CASE__ : str = name.replace("""text_projection""" , """text_projection.weight""" )
# things on top
if "prompts_visual_proj" in name:
SCREAMING_SNAKE_CASE__ : Tuple = name.replace("""prompts_visual_proj""" , """prompts_visual_projection""" )
if "prompts_visual_ln" in name:
SCREAMING_SNAKE_CASE__ : Optional[Any] = name.replace("""prompts_visual_ln""" , """prompts_visual_layernorm""" )
# mit
if name == "mit.positional_embedding":
SCREAMING_SNAKE_CASE__ : Tuple = name.replace("""positional""" , """position""" )
if name.startswith("""mit.resblocks""" ):
SCREAMING_SNAKE_CASE__ : Any = name.replace("""mit.resblocks""" , """mit.encoder.layers""" )
# prompts generator
if name.startswith("""prompts_generator.norm""" ):
SCREAMING_SNAKE_CASE__ : Tuple = name.replace("""prompts_generator.norm""" , """prompts_generator.layernorm""" )
return name
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE__ : int = orig_state_dict.pop(A_ )
if "attn.in_proj" in key:
SCREAMING_SNAKE_CASE__ : Tuple = key.split(""".""" )
if key.startswith("""visual""" ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = key_split[3]
SCREAMING_SNAKE_CASE__ : Any = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
SCREAMING_SNAKE_CASE__ : List[str] = val[
:dim, :
]
SCREAMING_SNAKE_CASE__ : List[str] = val[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE__ : Tuple = val[
-dim:, :
]
else:
SCREAMING_SNAKE_CASE__ : Dict = val[
:dim
]
SCREAMING_SNAKE_CASE__ : List[str] = val[
dim : dim * 2
]
SCREAMING_SNAKE_CASE__ : Any = val[
-dim:
]
else:
if "weight" in key:
SCREAMING_SNAKE_CASE__ : List[Any] = val[
:dim, :
]
SCREAMING_SNAKE_CASE__ : List[str] = val[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE__ : Any = val[
-dim:, :
]
else:
SCREAMING_SNAKE_CASE__ : List[str] = val[:dim]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = val[
dim : dim * 2
]
SCREAMING_SNAKE_CASE__ : Dict = val[-dim:]
elif key.startswith("""mit""" ):
SCREAMING_SNAKE_CASE__ : int = key_split[2]
SCREAMING_SNAKE_CASE__ : Optional[int] = config.vision_config.mit_hidden_size
if "weight" in key:
SCREAMING_SNAKE_CASE__ : Optional[Any] = val[:dim, :]
SCREAMING_SNAKE_CASE__ : List[str] = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE__ : Dict = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = val[:dim]
SCREAMING_SNAKE_CASE__ : Tuple = val[dim : dim * 2]
SCREAMING_SNAKE_CASE__ : List[str] = val[-dim:]
else:
SCREAMING_SNAKE_CASE__ : str = key_split[2]
SCREAMING_SNAKE_CASE__ : int = config.text_config.hidden_size
if "weight" in key:
SCREAMING_SNAKE_CASE__ : List[str] = val[:dim, :]
SCREAMING_SNAKE_CASE__ : List[Any] = val[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE__ : int = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE__ : List[Any] = val[:dim]
SCREAMING_SNAKE_CASE__ : Tuple = val[
dim : dim * 2
]
SCREAMING_SNAKE_CASE__ : Optional[int] = val[-dim:]
else:
SCREAMING_SNAKE_CASE__ : str = rename_key(A_ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
SCREAMING_SNAKE_CASE__ : int = val.T
SCREAMING_SNAKE_CASE__ : Union[str, Any] = val
return orig_state_dict
def _lowercase ( __lowerCAmelCase ) -> Optional[Any]:
if num_frames == 8:
SCREAMING_SNAKE_CASE__ : List[Any] = """eating_spaghetti_8_frames.npy"""
elif num_frames == 16:
SCREAMING_SNAKE_CASE__ : str = """eating_spaghetti.npy"""
elif num_frames == 32:
SCREAMING_SNAKE_CASE__ : str = """eating_spaghetti_32_frames.npy"""
SCREAMING_SNAKE_CASE__ : Tuple = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename=A_ , repo_type="""dataset""" , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.load(A_ )
return list(A_ )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=False ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] = {
# fully supervised kinetics-400 checkpoints
"""xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""",
"""xclip-base-patch32-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"""
),
"""xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""",
"""xclip-base-patch16-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"""
),
"""xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb""",
"""xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f""",
# fully supervised kinetics-600 checkpoints
"""xclip-base-patch16-kinetics-600""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"""
),
"""xclip-base-patch16-kinetics-600-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"""
),
"""xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be""",
# few shot
"""xclip-base-patch16-hmdb-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"""
),
"""xclip-base-patch16-hmdb-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"""
),
"""xclip-base-patch16-hmdb-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"""
),
"""xclip-base-patch16-hmdb-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"""
),
"""xclip-base-patch16-ucf-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"""
),
"""xclip-base-patch16-ucf-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"""
),
"""xclip-base-patch16-ucf-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"""
),
"""xclip-base-patch16-ucf-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"""
),
# zero shot
"""xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""",
}
SCREAMING_SNAKE_CASE__ : Optional[int] = model_to_url[model_name]
SCREAMING_SNAKE_CASE__ : Tuple = 8
if "16-frames" in model_name:
SCREAMING_SNAKE_CASE__ : Tuple = 16
elif "shot" in model_name:
SCREAMING_SNAKE_CASE__ : Dict = 32
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_xclip_config(A_ , A_ )
SCREAMING_SNAKE_CASE__ : List[Any] = XCLIPModel(A_ )
model.eval()
if "drive" in checkpoint_url:
SCREAMING_SNAKE_CASE__ : str = """pytorch_model.bin"""
gdown.cached_download(A_ , A_ , quiet=A_ )
SCREAMING_SNAKE_CASE__ : int = torch.load(A_ , map_location="""cpu""" )["""model"""]
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.hub.load_state_dict_from_url(A_ )["""model"""]
SCREAMING_SNAKE_CASE__ : Dict = convert_state_dict(A_ , A_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = XCLIPModel(A_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = model.load_state_dict(A_ , strict=A_ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 336 if model_name == """xclip-large-patch14-16-frames""" else 224
SCREAMING_SNAKE_CASE__ : Optional[int] = VideoMAEImageProcessor(size=A_ )
SCREAMING_SNAKE_CASE__ : int = CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" )
SCREAMING_SNAKE_CASE__ : List[Any] = CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" )
SCREAMING_SNAKE_CASE__ : int = XCLIPProcessor(image_processor=A_ , tokenizer=A_ )
SCREAMING_SNAKE_CASE__ : List[str] = prepare_video(A_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = processor(
text=["""playing sports""", """eating spaghetti""", """go shopping"""] , videos=A_ , return_tensors="""pt""" , padding=A_ )
print("""Shape of pixel values:""" , inputs.pixel_values.shape )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : str = model(**A_ )
# Verify outputs
SCREAMING_SNAKE_CASE__ : Optional[Any] = outputs.logits_per_video
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logits_per_video.softmax(dim=1 )
print("""Probs:""" , A_ )
# kinetics-400
if model_name == "xclip-base-patch32":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([[0.0_019, 0.9_951, 0.0_030]] )
elif model_name == "xclip-base-patch32-16-frames":
SCREAMING_SNAKE_CASE__ : Dict = torch.tensor([[7.0999E-04, 9.9883E-01, 4.5580E-04]] )
elif model_name == "xclip-base-patch16":
SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor([[0.0_083, 0.9_681, 0.0_236]] )
elif model_name == "xclip-base-patch16-16-frames":
SCREAMING_SNAKE_CASE__ : Dict = torch.tensor([[7.6937E-04, 9.9728E-01, 1.9473E-03]] )
elif model_name == "xclip-large-patch14":
SCREAMING_SNAKE_CASE__ : Dict = torch.tensor([[0.0_062, 0.9_864, 0.0_075]] )
elif model_name == "xclip-large-patch14-16-frames":
SCREAMING_SNAKE_CASE__ : int = torch.tensor([[3.3877E-04, 9.9937E-01, 2.8888E-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
SCREAMING_SNAKE_CASE__ : Dict = torch.tensor([[0.0_555, 0.8_914, 0.0_531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor([[3.8554E-04, 9.9929E-01, 3.2754E-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([[0.0_036, 0.9_920, 0.0_045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([[7.1890E-06, 9.9994E-01, 5.6559E-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
SCREAMING_SNAKE_CASE__ : int = torch.tensor([[1.0320E-05, 9.9993E-01, 6.2435E-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([[4.1377E-06, 9.9990E-01, 9.8386E-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
SCREAMING_SNAKE_CASE__ : Dict = torch.tensor([[4.1347E-05, 9.9962E-01, 3.3411E-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
SCREAMING_SNAKE_CASE__ : int = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
SCREAMING_SNAKE_CASE__ : int = torch.tensor([[0.0_027, 0.9_904, 0.0_070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([[9.8219E-04, 9.9593E-01, 3.0863E-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor([[3.5082E-04, 9.9785E-01, 1.7966E-03]] )
else:
raise ValueError(F'''Model name {model_name} not supported''' )
assert torch.allclose(A_ , A_ , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A_ )
if push_to_hub:
print("""Pushing model, processor and slow tokenizer files to the hub...""" )
model.push_to_hub(A_ , organization="""nielsr""" )
processor.push_to_hub(A_ , organization="""nielsr""" )
slow_tokenizer.push_to_hub(A_ , organization="""nielsr""" )
if __name__ == "__main__":
a :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="xclip-base-patch32",
type=str,
help="Name of the model.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
a :Dict = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 680 | '''simple docstring'''
def lowerCamelCase__ ( A_ , A_ ):
_validate_point(A_ )
_validate_point(A_ )
if len(A_ ) != len(A_ ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(A_ , A_ ) ) )
def lowerCamelCase__ ( A_ ):
if point:
if isinstance(A_ , A_ ):
for item in point:
if not isinstance(A_ , (int, float) ):
UpperCAmelCase_ = (
"Expected a list of numbers as input, found "
F"""{type(A_ ).__name__}"""
)
raise TypeError(A_ )
else:
UpperCAmelCase_ = F"""Expected a list of numbers as input, found {type(A_ ).__name__}"""
raise TypeError(A_ )
else:
raise ValueError("Missing an input" )
def lowerCamelCase__ ( A_ , A_ ):
_validate_point(A_ )
_validate_point(A_ )
if len(A_ ) != len(A_ ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(A_ , A_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 | 0 |
"""simple docstring"""
from __future__ import annotations
def A_ ( lowercase ) -> Any:
"""simple docstring"""
if len(A_ ) == 0:
return array
UpperCAmelCase_ ,UpperCAmelCase_ : Dict = min(A_ ), max(A_ )
# Compute the variables
UpperCAmelCase_ : str = _max - _min + 1
UpperCAmelCase_ ,UpperCAmelCase_ : Any = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
UpperCAmelCase_ : Dict = i - _min
UpperCAmelCase_ : List[Any] = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
UpperCAmelCase_ : Optional[int] = 0
for i in range(A_ ):
while holes_repeat[i] > 0:
UpperCAmelCase_ : Optional[Any] = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ = input("Enter numbers separated by comma:\n")
lowercase_ = [int(x) for x in user_input.split(",")]
print(pigeon_sort(unsorted))
| 470 | '''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
__snake_case : Optional[int] = logging.getLogger(__name__)
def lowerCamelCase__ ( A_ , A_ ):
# save results
if os.path.exists(A_ ):
if os.path.exists(os.path.join(A_ , "config.json" ) ) and os.path.isfile(
os.path.join(A_ , "config.json" ) ):
os.remove(os.path.join(A_ , "config.json" ) )
if os.path.exists(os.path.join(A_ , "pytorch_model.bin" ) ) and os.path.isfile(
os.path.join(A_ , "pytorch_model.bin" ) ):
os.remove(os.path.join(A_ , "pytorch_model.bin" ) )
else:
os.makedirs(A_ )
model.save_pretrained(A_ )
def lowerCamelCase__ ( A_ , A_=False ):
UpperCAmelCase_ = 2
if unlogit:
UpperCAmelCase_ = torch.pow(A_ , A_ )
UpperCAmelCase_ = p * torch.log(A_ )
UpperCAmelCase_ = 0
return -plogp.sum(dim=-1 )
def lowerCamelCase__ ( A_ ):
logger.info("lv, h >\t" + "\t".join(F"""{x + 1}""" for x in range(len(A_ ) ) ) )
for row in range(len(A_ ) ):
if tensor.dtype != torch.long:
logger.info(F"""layer {row + 1}:\t""" + "\t".join(F"""{x:.5f}""" for x in tensor[row].cpu().data ) )
else:
logger.info(F"""layer {row + 1}:\t""" + "\t".join(F"""{x:d}""" for x in tensor[row].cpu().data ) )
def lowerCamelCase__ ( A_ , A_ , A_ , A_=True , A_=True , A_=None , A_=False ):
UpperCAmelCase_ , UpperCAmelCase_ = model.config.num_hidden_layers, model.config.num_attention_heads
UpperCAmelCase_ = torch.zeros(A_ , A_ ).to(args.device )
UpperCAmelCase_ = torch.zeros(A_ , A_ ).to(args.device )
if head_mask is None:
UpperCAmelCase_ = torch.ones(A_ , A_ ).to(args.device )
head_mask.requires_grad_(requires_grad=A_ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
UpperCAmelCase_ = None
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = 0.0
for step, inputs in enumerate(tqdm(A_ , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ):
UpperCAmelCase_ = tuple(t.to(args.device ) for t in inputs )
((UpperCAmelCase_) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
UpperCAmelCase_ = model(A_ , labels=A_ , head_mask=A_ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(A_ ):
UpperCAmelCase_ = entropy(attn.detach() , A_ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(A_ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
UpperCAmelCase_ = 2
UpperCAmelCase_ = torch.pow(torch.pow(A_ , A_ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
UpperCAmelCase_ = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("Attention entropies" )
print_ad_tensor(A_ )
if compute_importance:
logger.info("Head importance scores" )
print_ad_tensor(A_ )
logger.info("Head ranked by importance scores" )
UpperCAmelCase_ = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
UpperCAmelCase_ = torch.arange(
head_importance.numel() , device=args.device )
UpperCAmelCase_ = head_ranks.view_as(A_ )
print_ad_tensor(A_ )
return attn_entropy, head_importance, total_loss
def lowerCamelCase__ ( A_ , A_ , A_ ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = compute_heads_importance(A_ , A_ , A_ , compute_entropy=A_ )
UpperCAmelCase_ = 1 / loss # instead of downsteam score use the LM loss
logger.info("Pruning: original score: %f, threshold: %f" , A_ , original_score * args.masking_threshold )
UpperCAmelCase_ = torch.ones_like(A_ )
UpperCAmelCase_ = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
UpperCAmelCase_ = original_score
while current_score >= original_score * args.masking_threshold:
UpperCAmelCase_ = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
UpperCAmelCase_ = float("Inf" )
UpperCAmelCase_ = head_importance.view(-1 ).sort()[1]
if len(A_ ) <= num_to_mask:
print("BREAK BY num_to_mask" )
break
# mask heads
UpperCAmelCase_ = current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) )
UpperCAmelCase_ = new_head_mask.view(-1 )
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = new_head_mask.view_as(A_ )
UpperCAmelCase_ = new_head_mask.clone().detach()
print_ad_tensor(A_ )
# Compute metric and head importance again
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = compute_heads_importance(
A_ , A_ , A_ , compute_entropy=A_ , head_mask=A_ )
UpperCAmelCase_ = 1 / loss
logger.info(
"Masking: current score: %f, remaining heads %d (%.1f percents)" , A_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("Final head mask" )
print_ad_tensor(A_ )
np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowerCamelCase__ ( A_ , A_ , A_ , A_ ):
UpperCAmelCase_ = datetime.now()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = compute_heads_importance(
A_ , A_ , A_ , compute_entropy=A_ , compute_importance=A_ , head_mask=A_ )
UpperCAmelCase_ = 1 / loss
UpperCAmelCase_ = datetime.now() - before_time
UpperCAmelCase_ = sum(p.numel() for p in model.parameters() )
UpperCAmelCase_ = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(A_ ) )
}
for k, v in heads_to_prune.items():
if isinstance(A_ , A_ ):
UpperCAmelCase_ = [
v,
]
assert sum(len(A_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(A_ )
UpperCAmelCase_ = sum(p.numel() for p in model.parameters() )
UpperCAmelCase_ = datetime.now()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = compute_heads_importance(
A_ , A_ , A_ , compute_entropy=A_ , compute_importance=A_ , head_mask=A_ , actually_pruned=A_ , )
UpperCAmelCase_ = 1 / loss
UpperCAmelCase_ = datetime.now() - before_time
logger.info(
"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , A_ , A_ , pruned_num_params / original_num_params * 100 , )
logger.info("Pruning: score with masking: %f score with pruning: %f" , A_ , A_ )
logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 100 )
save_model(A_ , args.output_dir )
def lowerCamelCase__ ( ):
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir" , default=A_ , type=A_ , required=A_ , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , )
parser.add_argument(
"--model_name_or_path" , default=A_ , type=A_ , required=A_ , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--output_dir" , default=A_ , type=A_ , required=A_ , help="The output directory where the model predictions and checkpoints will be written." , )
# Other parameters
parser.add_argument(
"--config_name" , default="" , type=A_ , help="Pretrained config name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--tokenizer_name" , default="" , type=A_ , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--cache_dir" , default=A_ , type=A_ , help="Where do you want to store the pre-trained models downloaded from s3" , )
parser.add_argument(
"--data_subset" , type=A_ , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." )
parser.add_argument(
"--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
parser.add_argument(
"--dont_normalize_importance_by_layer" , action="store_true" , help="Don't normalize importance score by layers" )
parser.add_argument(
"--dont_normalize_global_importance" , action="store_true" , help="Don't normalize all importance scores between 0 and 1" , )
parser.add_argument(
"--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." )
parser.add_argument(
"--masking_threshold" , default=0.9 , type=A_ , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , )
parser.add_argument(
"--masking_amount" , default=0.1 , type=A_ , help="Amount to heads to masking at each masking step." )
parser.add_argument("--metric_name" , default="acc" , type=A_ , help="Metric to use for head masking." )
parser.add_argument(
"--max_seq_length" , default=128 , type=A_ , help=(
"The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded."
) , )
parser.add_argument("--batch_size" , default=1 , type=A_ , help="Batch size." )
parser.add_argument("--seed" , type=A_ , default=42 )
parser.add_argument("--local_rank" , type=A_ , default=-1 , help="local_rank for distributed training on gpus" )
parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" )
parser.add_argument("--server_ip" , type=A_ , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=A_ , default="" , help="Can be used for distant debugging." )
UpperCAmelCase_ = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=A_ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
UpperCAmelCase_ = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" )
UpperCAmelCase_ = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
UpperCAmelCase_ = torch.device("cuda" , args.local_rank )
UpperCAmelCase_ = 1
torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
UpperCAmelCase_ = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
UpperCAmelCase_ = nn.parallel.DistributedDataParallel(
A_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=A_ )
elif args.n_gpu > 1:
UpperCAmelCase_ = nn.DataParallel(A_ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=A_ )
torch.save(A_ , os.path.join(args.output_dir , "run_args.bin" ) )
logger.info("Training/evaluation parameters %s" , A_ )
# Prepare dataset
UpperCAmelCase_ = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
UpperCAmelCase_ = (torch.from_numpy(A_ ),)
UpperCAmelCase_ = TensorDataset(*A_ )
UpperCAmelCase_ = RandomSampler(A_ )
UpperCAmelCase_ = DataLoader(A_ , sampler=A_ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(A_ , A_ , A_ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
UpperCAmelCase_ = mask_heads(A_ , A_ , A_ )
prune_heads(A_ , A_ , A_ , A_ )
if __name__ == "__main__":
main()
| 660 | 0 |
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
def __init__( self , __A , __A = True , __A = None , __A = 32 , __A = True , __A = 1 / 255 , __A = True , __A = True , __A = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , __A = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , __A = True , __A=7 , __A=30 , __A=400 , __A=3 , ):
__UpperCAmelCase = parent
__UpperCAmelCase = do_resize
__UpperCAmelCase = size if size is not None else {'shortest_edge': 288}
__UpperCAmelCase = size_divisor
__UpperCAmelCase = do_rescale
__UpperCAmelCase = rescale_factor
__UpperCAmelCase = do_normalize
__UpperCAmelCase = do_center_crop
__UpperCAmelCase = image_mean
__UpperCAmelCase = image_std
__UpperCAmelCase = do_pad
__UpperCAmelCase = batch_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = min_resolution
__UpperCAmelCase = max_resolution
def __lowerCamelCase ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def __lowerCamelCase ( self , __A , __A=False ):
if not batched:
__UpperCAmelCase = self.size['shortest_edge']
__UpperCAmelCase = image_inputs[0]
if isinstance(UpperCamelCase__ , Image.Image ):
__UpperCAmelCase , __UpperCAmelCase = image.size
else:
__UpperCAmelCase , __UpperCAmelCase = image.shape[1], image.shape[2]
__UpperCAmelCase = size / min(UpperCamelCase__ , UpperCamelCase__ )
if h < w:
__UpperCAmelCase , __UpperCAmelCase = size, scale * w
else:
__UpperCAmelCase , __UpperCAmelCase = scale * h, size
__UpperCAmelCase = int((1_333 / 800) * size )
if max(UpperCamelCase__ , UpperCamelCase__ ) > max_size:
__UpperCAmelCase = max_size / max(UpperCamelCase__ , UpperCamelCase__ )
__UpperCAmelCase = newh * scale
__UpperCAmelCase = neww * scale
__UpperCAmelCase , __UpperCAmelCase = int(newh + 0.5 ), int(neww + 0.5 )
__UpperCAmelCase , __UpperCAmelCase = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
__UpperCAmelCase = []
for image in image_inputs:
__UpperCAmelCase , __UpperCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__UpperCAmelCase = max(UpperCamelCase__ , key=lambda __A : item[0] )[0]
__UpperCAmelCase = max(UpperCamelCase__ , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCAmelCase ( _A , unittest.TestCase ):
_A : List[Any] = BridgeTowerImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
__UpperCAmelCase = BridgeTowerImageProcessingTester(self )
@property
def __lowerCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , 'image_mean' ) )
self.assertTrue(hasattr(UpperCamelCase__ , 'image_std' ) )
self.assertTrue(hasattr(UpperCamelCase__ , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCamelCase__ , 'do_resize' ) )
self.assertTrue(hasattr(UpperCamelCase__ , 'size' ) )
self.assertTrue(hasattr(UpperCamelCase__ , 'size_divisor' ) )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
__UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__UpperCAmelCase , __UpperCAmelCase = self.image_processor_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCAmelCase = image_processing(UpperCamelCase__ , return_tensors='pt' ).pixel_values
__UpperCAmelCase , __UpperCAmelCase = self.image_processor_tester.get_expected_values(UpperCamelCase__ , batched=UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
__UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__UpperCAmelCase , __UpperCAmelCase = self.image_processor_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCAmelCase = image_processing(UpperCamelCase__ , return_tensors='pt' ).pixel_values
__UpperCAmelCase , __UpperCAmelCase = self.image_processor_tester.get_expected_values(UpperCamelCase__ , batched=UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
__UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__UpperCAmelCase , __UpperCAmelCase = self.image_processor_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCAmelCase = image_processing(UpperCamelCase__ , return_tensors='pt' ).pixel_values
__UpperCAmelCase , __UpperCAmelCase = self.image_processor_tester.get_expected_values(UpperCamelCase__ , batched=UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 126 | '''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__snake_case : str = logging.getLogger(__name__)
def lowerCamelCase__ ( ):
UpperCAmelCase_ = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path" , type=A_ , default="data/dump.txt" , help="The path to the data." )
parser.add_argument("--tokenizer_type" , type=A_ , default="bert" , choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name" , type=A_ , default="bert-base-uncased" , help="The tokenizer to use." )
parser.add_argument("--dump_file" , type=A_ , default="data/dump" , help="The dump file prefix." )
UpperCAmelCase_ = parser.parse_args()
logger.info(F"""Loading Tokenizer ({args.tokenizer_name})""" )
if args.tokenizer_type == "bert":
UpperCAmelCase_ = BertTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase_ = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
UpperCAmelCase_ = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
UpperCAmelCase_ = RobertaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase_ = tokenizer.special_tokens_map["cls_token"] # `<s>`
UpperCAmelCase_ = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
UpperCAmelCase_ = GPTaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase_ = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
UpperCAmelCase_ = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(F"""Loading text from {args.file_path}""" )
with open(args.file_path , "r" , encoding="utf8" ) as fp:
UpperCAmelCase_ = fp.readlines()
logger.info("Start encoding" )
logger.info(F"""{len(A_ )} examples to process.""" )
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
UpperCAmelCase_ = 10_000
UpperCAmelCase_ = time.time()
for text in data:
UpperCAmelCase_ = F"""{bos} {text.strip()} {sep}"""
UpperCAmelCase_ = tokenizer.encode(A_ , add_special_tokens=A_ )
rslt.append(A_ )
iter += 1
if iter % interval == 0:
UpperCAmelCase_ = time.time()
logger.info(F"""{iter} examples processed. - {(end-start):.2f}s/{interval}expl""" )
UpperCAmelCase_ = time.time()
logger.info("Finished binarization" )
logger.info(F"""{len(A_ )} examples processed.""" )
UpperCAmelCase_ = F"""{args.dump_file}.{args.tokenizer_name}.pickle"""
UpperCAmelCase_ = tokenizer.vocab_size
if vocab_size < (1 << 16):
UpperCAmelCase_ = [np.uintaa(A_ ) for d in rslt]
else:
UpperCAmelCase_ = [np.intaa(A_ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F"""Dump to {dp_file}""" )
with open(A_ , "wb" ) as handle:
pickle.dump(rslt_ , A_ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 660 | 0 |
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
UpperCamelCase = parser.parse_args()
if args.model_type == "bert":
UpperCamelCase = BertForMaskedLM.from_pretrained(args.model_name)
UpperCamelCase = '''bert'''
else:
raise ValueError('args.model_type should be "bert".')
UpperCamelCase = model.state_dict()
UpperCamelCase = {}
for w in ["word_embeddings", "position_embeddings"]:
UpperCamelCase = state_dict[F"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
UpperCamelCase = state_dict[F"""{prefix}.embeddings.LayerNorm.{w}"""]
UpperCamelCase = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
UpperCamelCase = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
UpperCamelCase = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
UpperCamelCase = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
UpperCamelCase = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
UpperCamelCase = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
UpperCamelCase = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
UpperCamelCase = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
UpperCamelCase = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
UpperCamelCase = state_dict['''cls.predictions.decoder.weight''']
UpperCamelCase = state_dict['''cls.predictions.bias''']
if args.vocab_transform:
for w in ["weight", "bias"]:
UpperCamelCase = state_dict[F"""cls.predictions.transform.dense.{w}"""]
UpperCamelCase = state_dict[F"""cls.predictions.transform.LayerNorm.{w}"""]
print(F"""N layers selected for distillation: {std_idx}""")
print(F"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(F"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 269 | '''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__snake_case : Dict = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
__snake_case : str = json.load(f)
@require_torch
class lowercase_ ( unittest.TestCase ):
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Dict:
"""simple docstring"""
return FSMTTokenizer.from_pretrained(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
UpperCAmelCase_ = FSMTForConditionalGeneration.from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["en-ru", 26.0],
["ru-en", 22.0],
["en-de", 22.0],
["de-en", 29.0],
] )
@slow
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = F"""facebook/wmt19-{pair}"""
UpperCAmelCase_ = self.get_tokenizer(UpperCamelCase__ )
UpperCAmelCase_ = self.get_model(UpperCamelCase__ )
UpperCAmelCase_ = bleu_data[pair]["src"]
UpperCAmelCase_ = bleu_data[pair]["tgt"]
UpperCAmelCase_ = tokenizer(UpperCamelCase__ , return_tensors="pt" , truncation=UpperCamelCase__ , padding="longest" ).to(UpperCamelCase__ )
UpperCAmelCase_ = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
UpperCAmelCase_ = tokenizer.batch_decode(
UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )
UpperCAmelCase_ = calculate_bleu(UpperCamelCase__ , UpperCamelCase__ )
print(UpperCamelCase__ )
self.assertGreaterEqual(scores["bleu"] , UpperCamelCase__ )
| 660 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
UpperCAmelCase_ : Tuple = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {
'''openai/whisper-base''': '''https://huggingface.co/openai/whisper-base/resolve/main/config.json''',
}
# fmt: off
UpperCAmelCase_ : Tuple = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 1_0563, 1_0786,
1_1420, 1_1709, 1_1907, 1_3163, 1_3697, 1_3700, 1_4808, 1_5306, 1_6410, 1_6791,
1_7992, 1_9203, 1_9510, 2_0724, 2_2305, 2_2935, 2_7007, 3_0109, 3_0420, 3_3409,
3_4949, 4_0283, 4_0493, 4_0549, 4_7282, 4_9146, 5_0257, 5_0359, 5_0360, 5_0361
]
UpperCAmelCase_ : Any = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 1_0428, 1_0929, 1_1938, 1_2033, 1_2331, 1_2562, 1_3793,
1_4157, 1_4635, 1_5265, 1_5618, 1_6553, 1_6604, 1_8362, 1_8956, 2_0075, 2_1675,
2_2520, 2_6130, 2_6161, 2_6435, 2_8279, 2_9464, 3_1650, 3_2302, 3_2470, 3_6865,
4_2863, 4_7425, 4_9870, 5_0254, 5_0258, 5_0360, 5_0361, 5_0362
]
class lowercase__ ( _A ):
'''simple docstring'''
A_ : Optional[int] = """whisper"""
A_ : Optional[int] = ["""past_key_values"""]
A_ : int = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , __snake_case=5_1865 , __snake_case=80 , __snake_case=6 , __snake_case=4 , __snake_case=6 , __snake_case=4 , __snake_case=1536 , __snake_case=1536 , __snake_case=0.0 , __snake_case=0.0 , __snake_case=5_0257 , __snake_case=True , __snake_case=True , __snake_case="gelu" , __snake_case=256 , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.02 , __snake_case=False , __snake_case=1500 , __snake_case=448 , __snake_case=5_0256 , __snake_case=5_0256 , __snake_case=5_0256 , __snake_case=None , __snake_case=[220, 5_0256] , __snake_case=False , __snake_case=256 , __snake_case=False , __snake_case=0.05 , __snake_case=10 , __snake_case=2 , __snake_case=0.0 , __snake_case=10 , __snake_case=0 , __snake_case=7 , **__snake_case , ):
_SCREAMING_SNAKE_CASE : int = vocab_size
_SCREAMING_SNAKE_CASE : List[Any] = num_mel_bins
_SCREAMING_SNAKE_CASE : Dict = d_model
_SCREAMING_SNAKE_CASE : int = encoder_layers
_SCREAMING_SNAKE_CASE : Tuple = encoder_attention_heads
_SCREAMING_SNAKE_CASE : str = decoder_layers
_SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_attention_heads
_SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_ffn_dim
_SCREAMING_SNAKE_CASE : int = encoder_ffn_dim
_SCREAMING_SNAKE_CASE : List[Any] = dropout
_SCREAMING_SNAKE_CASE : List[str] = attention_dropout
_SCREAMING_SNAKE_CASE : List[str] = activation_dropout
_SCREAMING_SNAKE_CASE : Tuple = activation_function
_SCREAMING_SNAKE_CASE : List[Any] = init_std
_SCREAMING_SNAKE_CASE : str = encoder_layerdrop
_SCREAMING_SNAKE_CASE : int = decoder_layerdrop
_SCREAMING_SNAKE_CASE : int = use_cache
_SCREAMING_SNAKE_CASE : Optional[int] = encoder_layers
_SCREAMING_SNAKE_CASE : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
_SCREAMING_SNAKE_CASE : Dict = max_source_positions
_SCREAMING_SNAKE_CASE : Optional[int] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
_SCREAMING_SNAKE_CASE : str = classifier_proj_size
_SCREAMING_SNAKE_CASE : Tuple = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_SCREAMING_SNAKE_CASE : List[str] = apply_spec_augment
_SCREAMING_SNAKE_CASE : List[Any] = mask_time_prob
_SCREAMING_SNAKE_CASE : Optional[Any] = mask_time_length
_SCREAMING_SNAKE_CASE : List[str] = mask_time_min_masks
_SCREAMING_SNAKE_CASE : Optional[int] = mask_feature_prob
_SCREAMING_SNAKE_CASE : Optional[Any] = mask_feature_length
_SCREAMING_SNAKE_CASE : List[Any] = mask_feature_min_masks
_SCREAMING_SNAKE_CASE : Optional[int] = median_filter_width
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , suppress_tokens=UpperCamelCase__ , begin_suppress_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
class lowercase__ ( _A ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : List[Any] = OrderedDict(
[
("""input_features""", {0: """batch""", 1: """feature_size""", 2: """encoder_sequence"""}),
] )
if self.use_past:
_SCREAMING_SNAKE_CASE : List[str] = {0: """batch"""}
else:
_SCREAMING_SNAKE_CASE : int = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction="""inputs""" )
return common_inputs
def UpperCAmelCase_ ( self , __snake_case , __snake_case = -1 , __snake_case = -1 , __snake_case = False , __snake_case = None , __snake_case = 2_2050 , __snake_case = 5.0 , __snake_case = 220 , ):
_SCREAMING_SNAKE_CASE : str = OrderedDict()
_SCREAMING_SNAKE_CASE : str = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=UpperCamelCase__ , framework=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , time_duration=UpperCamelCase__ , frequency=UpperCamelCase__ , )
_SCREAMING_SNAKE_CASE : Tuple = encoder_inputs["""input_features"""].shape[2]
_SCREAMING_SNAKE_CASE : Dict = encoder_sequence_length // 2 if self.use_past else seq_length
_SCREAMING_SNAKE_CASE : Any = super().generate_dummy_inputs(
preprocessor.tokenizer , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_SCREAMING_SNAKE_CASE : Optional[int] = encoder_inputs.pop("""input_features""" )
_SCREAMING_SNAKE_CASE : Optional[Any] = decoder_inputs.pop("""decoder_input_ids""" )
if "past_key_values" in decoder_inputs:
_SCREAMING_SNAKE_CASE : Any = decoder_inputs.pop("""past_key_values""" )
return dummy_inputs
@property
def UpperCAmelCase_ ( self ):
return 1e-3
| 533 | '''simple docstring'''
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
__snake_case : List[Any] = {
'''sample_size''': 32,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': 10_00,
'''block_out_channels''': [32, 64],
'''attention_head_dim''': 8,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
__snake_case : Optional[int] = {
'''sample_size''': 64,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 3,
'''num_class_embeds''': 10_00,
'''block_out_channels''': [1_92, 1_92 * 2, 1_92 * 3, 1_92 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
__snake_case : int = {
'''sample_size''': 2_56,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': None,
'''block_out_channels''': [2_56, 2_56, 2_56 * 2, 2_56 * 2, 2_56 * 4, 2_56 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''default''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
__snake_case : Dict = {
'''num_train_timesteps''': 40,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
__snake_case : Tuple = {
'''num_train_timesteps''': 2_01,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
__snake_case : str = {
'''num_train_timesteps''': 1_51,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
def lowerCamelCase__ ( A_ ):
if isinstance(A_ , A_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("boolean value expected" )
def lowerCamelCase__ ( A_ , A_ , A_ , A_ , A_=False ):
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.0.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.0.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.2.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.2.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.emb_layers.1.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.emb_layers.1.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.0.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.0.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.3.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.3.bias"""]
if has_skip:
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.skip_connection.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.skip_connection.bias"""]
return new_checkpoint
def lowerCamelCase__ ( A_ , A_ , A_ , A_ , A_=None ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = checkpoint[F"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = checkpoint[F"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 )
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.norm.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.norm.bias"""]
UpperCAmelCase_ = weight_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = bias_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = weight_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = bias_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = weight_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = bias_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = (
checkpoint[F"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 )
)
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def lowerCamelCase__ ( A_ , A_ ):
UpperCAmelCase_ = torch.load(A_ , map_location="cpu" )
UpperCAmelCase_ = {}
UpperCAmelCase_ = checkpoint["time_embed.0.weight"]
UpperCAmelCase_ = checkpoint["time_embed.0.bias"]
UpperCAmelCase_ = checkpoint["time_embed.2.weight"]
UpperCAmelCase_ = checkpoint["time_embed.2.bias"]
if unet_config["num_class_embeds"] is not None:
UpperCAmelCase_ = checkpoint["label_emb.weight"]
UpperCAmelCase_ = checkpoint["input_blocks.0.0.weight"]
UpperCAmelCase_ = checkpoint["input_blocks.0.0.bias"]
UpperCAmelCase_ = unet_config["down_block_types"]
UpperCAmelCase_ = unet_config["layers_per_block"]
UpperCAmelCase_ = unet_config["attention_head_dim"]
UpperCAmelCase_ = unet_config["block_out_channels"]
UpperCAmelCase_ = 1
UpperCAmelCase_ = channels_list[0]
for i, layer_type in enumerate(A_ ):
UpperCAmelCase_ = channels_list[i]
UpperCAmelCase_ = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(A_ ):
UpperCAmelCase_ = F"""down_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase_ = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(A_ ):
UpperCAmelCase_ = F"""down_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase_ = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
UpperCAmelCase_ = F"""down_blocks.{i}.attentions.{j}"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.1"""
UpperCAmelCase_ = convert_attention(
A_ , A_ , A_ , A_ , A_ )
current_layer += 1
if i != len(A_ ) - 1:
UpperCAmelCase_ = F"""down_blocks.{i}.downsamplers.0"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
current_layer += 1
UpperCAmelCase_ = current_channels
# hardcoded the mid-block for now
UpperCAmelCase_ = "mid_block.resnets.0"
UpperCAmelCase_ = "middle_block.0"
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
UpperCAmelCase_ = "mid_block.attentions.0"
UpperCAmelCase_ = "middle_block.1"
UpperCAmelCase_ = convert_attention(A_ , A_ , A_ , A_ , A_ )
UpperCAmelCase_ = "mid_block.resnets.1"
UpperCAmelCase_ = "middle_block.2"
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
UpperCAmelCase_ = 0
UpperCAmelCase_ = unet_config["up_block_types"]
for i, layer_type in enumerate(A_ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase_ = F"""up_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""output_blocks.{current_layer}.0"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
current_layer += 1
if i != len(A_ ) - 1:
UpperCAmelCase_ = F"""up_blocks.{i}.upsamplers.0"""
UpperCAmelCase_ = F"""output_blocks.{current_layer-1}.1"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase_ = F"""up_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""output_blocks.{current_layer}.0"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
UpperCAmelCase_ = F"""up_blocks.{i}.attentions.{j}"""
UpperCAmelCase_ = F"""output_blocks.{current_layer}.1"""
UpperCAmelCase_ = convert_attention(
A_ , A_ , A_ , A_ , A_ )
current_layer += 1
if i != len(A_ ) - 1:
UpperCAmelCase_ = F"""up_blocks.{i}.upsamplers.0"""
UpperCAmelCase_ = F"""output_blocks.{current_layer-1}.2"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
UpperCAmelCase_ = checkpoint["out.0.weight"]
UpperCAmelCase_ = checkpoint["out.0.bias"]
UpperCAmelCase_ = checkpoint["out.2.weight"]
UpperCAmelCase_ = checkpoint["out.2.bias"]
return new_checkpoint
if __name__ == "__main__":
__snake_case : List[str] = argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
__snake_case : List[str] = parser.parse_args()
__snake_case : Any = strabool(args.class_cond)
__snake_case : List[str] = os.path.basename(args.unet_path)
print(F'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
__snake_case : Optional[int] = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__snake_case : Union[str, Any] = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
__snake_case : List[str] = TEST_UNET_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
__snake_case : Optional[Any] = None
__snake_case : Optional[int] = con_pt_to_diffuser(args.unet_path, unet_config)
__snake_case : str = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
__snake_case : Tuple = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
__snake_case : Optional[int] = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__snake_case : Union[str, Any] = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
__snake_case : Optional[Any] = CMStochasticIterativeScheduler(**scheduler_config)
__snake_case : Dict = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 660 | 0 |
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class __magic_name__ ( unittest.TestCase):
'''simple docstring'''
@slow
def _A ( self: Any ):
SCREAMING_SNAKE_CASE_ = FlaxXLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained('''xlm-roberta-base''' )
SCREAMING_SNAKE_CASE_ = '''The dog is cute and lives in the garden house'''
SCREAMING_SNAKE_CASE_ = jnp.array([tokenizer.encode(UpperCamelCase__ )] )
SCREAMING_SNAKE_CASE_ = (1, 12, 7_68) # batch_size, sequence_length, embedding_vector_dim
SCREAMING_SNAKE_CASE_ = jnp.array(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
SCREAMING_SNAKE_CASE_ = model(UpperCamelCase__ )['''last_hidden_state''']
self.assertEqual(output.shape , UpperCamelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , UpperCamelCase__ , atol=1E-3 ) )
| 234 | '''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
__snake_case : Any = _symbol_database.Default()
__snake_case : Dict = _descriptor_pool.Default().AddSerializedFile(
B'''\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'''
)
__snake_case : Union[str, Any] = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, '''sentencepiece_model_pb2''', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
__snake_case : Any = None
__snake_case : Dict = B'''H\003'''
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
__snake_case : Union[str, Any] = 45
__snake_case : str = 15_81
__snake_case : Optional[int] = 15_17
__snake_case : Optional[Any] = 15_70
__snake_case : Union[str, Any] = 15_84
__snake_case : Any = 17_93
__snake_case : Optional[int] = 17_95
__snake_case : Tuple = 19_16
__snake_case : int = 18_64
__snake_case : Any = 19_05
__snake_case : Optional[int] = 19_19
__snake_case : str = 24_29
__snake_case : Tuple = 22_08
__snake_case : str = 24_18
__snake_case : Tuple = 23_23
__snake_case : Optional[int] = 24_07
# @@protoc_insertion_point(module_scope)
| 660 | 0 |
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__UpperCAmelCase = '''.'''
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
__UpperCAmelCase = [
'''Assert''',
'''AssignVariableOp''',
'''EmptyTensorList''',
'''MergeV2Checkpoints''',
'''ReadVariableOp''',
'''ResourceGather''',
'''RestoreV2''',
'''SaveV2''',
'''ShardedFilename''',
'''StatefulPartitionedCall''',
'''StaticRegexFullMatch''',
'''VarHandleOp''',
]
def lowercase__ ( __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = SavedModel()
UpperCAmelCase_ : str = []
with open(os.path.join(A_ , 'utils' , 'tf_ops' , 'onnx.json' ) ) as f:
UpperCAmelCase_ : int = json.load(A_ )['opsets']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(A_ )] )
with open(A_ , 'rb' ) as f:
saved_model.ParseFromString(f.read() )
UpperCAmelCase_ : Union[str, Any] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
UpperCAmelCase_ : List[str] = sorted(A_ )
UpperCAmelCase_ : List[Any] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(A_ )
if strict and len(A_ ) > 0:
raise Exception(F"Found the following incompatible ops for the opset {opset}:\n" + incompatible_ops )
elif len(A_ ) > 0:
print(F"Found the following incompatible ops for the opset {opset}:" )
print(*A_ , sep='\n' )
else:
print(F"The saved model {saved_model_path} can properly be converted with ONNX." )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--saved_model_path', help='Path of the saved model to check (the .pb file).')
parser.add_argument(
'--opset', default=12, type=int, help='The ONNX opset against which the model has to be tested.'
)
parser.add_argument(
'--framework', choices=['onnx'], default='onnx', help='Frameworks against which to test the saved model.'
)
parser.add_argument(
'--strict', action='store_true', help='Whether make the checking strict (raise errors) or not (raise warnings)'
)
__UpperCAmelCase = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 406 | '''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowercase_ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase_ = FlaxXLMRobertaModel.from_pretrained("xlm-roberta-base" )
UpperCAmelCase_ = AutoTokenizer.from_pretrained("xlm-roberta-base" )
UpperCAmelCase_ = "The dog is cute and lives in the garden house"
UpperCAmelCase_ = jnp.array([tokenizer.encode(UpperCamelCase__ )] )
UpperCAmelCase_ = (1, 1_2, 7_6_8) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
UpperCAmelCase_ = model(UpperCamelCase__ )["last_hidden_state"]
self.assertEqual(output.shape , UpperCamelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , UpperCamelCase__ , atol=1e-3 ) )
| 660 | 0 |
"""simple docstring"""
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
A = logging.get_logger(__name__)
class a__ ( _A ):
lowercase_ = "linear"
lowercase_ = "cosine"
lowercase_ = "cosine_with_restarts"
lowercase_ = "polynomial"
lowercase_ = "constant"
lowercase_ = "constant_with_warmup"
lowercase_ = "piecewise_constant"
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase = -1 ) -> Dict:
"""simple docstring"""
return LambdaLR(A_ , lambda UpperCamelCase : 1 , last_epoch=A_ )
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase = -1 ) -> Optional[Any]:
"""simple docstring"""
def lr_lambda(UpperCamelCase ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1.0 , A_ ) )
return 1.0
return LambdaLR(A_ , A_ , last_epoch=A_ )
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase = -1 ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = {}
__UpperCAmelCase : Optional[Any] = step_rules.split("," )
for rule_str in rule_list[:-1]:
__UpperCAmelCase , __UpperCAmelCase : Dict = rule_str.split(":" )
__UpperCAmelCase : Union[str, Any] = int(A_ )
__UpperCAmelCase : Optional[int] = float(A_ )
__UpperCAmelCase : str = value
__UpperCAmelCase : Dict = float(rule_list[-1] )
def create_rules_function(UpperCamelCase , UpperCamelCase ):
def rule_func(UpperCamelCase ) -> float:
__UpperCAmelCase : Optional[int] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(A_ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__UpperCAmelCase : Optional[int] = create_rules_function(A_ , A_ )
return LambdaLR(A_ , A_ , last_epoch=A_ )
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=-1 ) -> List[Any]:
"""simple docstring"""
def lr_lambda(UpperCamelCase ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1 , A_ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(A_ , A_ , A_ )
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = 0.5 , UpperCamelCase = -1 ) -> Any:
"""simple docstring"""
def lr_lambda(UpperCamelCase ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1 , A_ ) )
__UpperCAmelCase : Tuple = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(A_ ) * 2.0 * progress )) )
return LambdaLR(A_ , A_ , A_ )
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = 1 , UpperCamelCase = -1 ) -> str:
"""simple docstring"""
def lr_lambda(UpperCamelCase ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1 , A_ ) )
__UpperCAmelCase : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(A_ ) * progress) % 1.0) )) )
return LambdaLR(A_ , A_ , A_ )
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=1e-7 , UpperCamelCase=1.0 , UpperCamelCase=-1 ) -> Optional[int]:
"""simple docstring"""
__UpperCAmelCase : Any = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(f"lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})" )
def lr_lambda(UpperCamelCase ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1 , A_ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__UpperCAmelCase : Dict = lr_init - lr_end
__UpperCAmelCase : Optional[Any] = num_training_steps - num_warmup_steps
__UpperCAmelCase : Any = 1 - (current_step - num_warmup_steps) / decay_steps
__UpperCAmelCase : Union[str, Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(A_ , A_ , A_ )
A = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = 1 , UpperCamelCase = 1.0 , UpperCamelCase = -1 , ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase : Tuple = SchedulerType(A_ )
__UpperCAmelCase : Optional[int] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(A_ , last_epoch=A_ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(A_ , step_rules=A_ , last_epoch=A_ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"{name} requires `num_warmup_steps`, please provide that argument." )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(A_ , num_warmup_steps=A_ , last_epoch=A_ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"{name} requires `num_training_steps`, please provide that argument." )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
A_ , num_warmup_steps=A_ , num_training_steps=A_ , num_cycles=A_ , last_epoch=A_ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
A_ , num_warmup_steps=A_ , num_training_steps=A_ , power=A_ , last_epoch=A_ , )
return schedule_func(
A_ , num_warmup_steps=A_ , num_training_steps=A_ , last_epoch=A_ )
| 77 | '''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCamelCase__ ( A_ , A_ , A_ ):
# Construct model
if gpta_config_file == "":
UpperCAmelCase_ = GPTaConfig()
else:
UpperCAmelCase_ = GPTaConfig.from_json_file(A_ )
UpperCAmelCase_ = GPTaModel(A_ )
# Load weights from numpy
load_tf_weights_in_gpta(A_ , A_ , A_ )
# Save pytorch-model
UpperCAmelCase_ = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
UpperCAmelCase_ = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , A_ )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(A_ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__snake_case : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--gpt2_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
__snake_case : Dict = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 660 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
snake_case__ : str = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
snake_case__ : List[str] = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = AudioClassificationPipeline(model=UpperCamelCase__ , feature_extractor=UpperCamelCase__ )
# test with a raw waveform
__SCREAMING_SNAKE_CASE = np.zeros((3_4_0_0_0,) )
__SCREAMING_SNAKE_CASE = np.zeros((1_4_0_0_0,) )
return audio_classifier, [audioa, audio]
def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str ) -> Dict:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = examples
__SCREAMING_SNAKE_CASE = audio_classifier(UpperCamelCase__ )
# by default a model is initialized with num_labels=2
self.assertEqual(
UpperCamelCase__ , [
{"score": ANY(UpperCamelCase__ ), "label": ANY(UpperCamelCase__ )},
{"score": ANY(UpperCamelCase__ ), "label": ANY(UpperCamelCase__ )},
] , )
__SCREAMING_SNAKE_CASE = audio_classifier(UpperCamelCase__ , top_k=1 )
self.assertEqual(
UpperCamelCase__ , [
{"score": ANY(UpperCamelCase__ ), "label": ANY(UpperCamelCase__ )},
] , )
self.run_torchaudio(UpperCamelCase__ )
@require_torchaudio
def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : str ) -> Any:
import datasets
# test with a local file
__SCREAMING_SNAKE_CASE = datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
__SCREAMING_SNAKE_CASE = dataset[0]["audio"]["array"]
__SCREAMING_SNAKE_CASE = audio_classifier(UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
{"score": ANY(UpperCamelCase__ ), "label": ANY(UpperCamelCase__ )},
{"score": ANY(UpperCamelCase__ ), "label": ANY(UpperCamelCase__ )},
] , )
@require_torch
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
__SCREAMING_SNAKE_CASE = "anton-l/wav2vec2-random-tiny-classifier"
__SCREAMING_SNAKE_CASE = pipeline("audio-classification" , model=UpperCamelCase__ )
__SCREAMING_SNAKE_CASE = np.ones((8_0_0_0,) )
__SCREAMING_SNAKE_CASE = audio_classifier(UpperCamelCase__ , top_k=4 )
__SCREAMING_SNAKE_CASE = [
{"score": 0.0_842, "label": "no"},
{"score": 0.0_838, "label": "up"},
{"score": 0.0_837, "label": "go"},
{"score": 0.0_834, "label": "right"},
]
__SCREAMING_SNAKE_CASE = [
{"score": 0.0_845, "label": "stop"},
{"score": 0.0_844, "label": "on"},
{"score": 0.0_841, "label": "right"},
{"score": 0.0_834, "label": "left"},
]
self.assertIn(nested_simplify(UpperCamelCase__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
__SCREAMING_SNAKE_CASE = {"array": np.ones((8_0_0_0,) ), "sampling_rate": audio_classifier.feature_extractor.sampling_rate}
__SCREAMING_SNAKE_CASE = audio_classifier(UpperCamelCase__ , top_k=4 )
self.assertIn(nested_simplify(UpperCamelCase__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def UpperCAmelCase_ ( self : Optional[int] ) -> List[Any]:
import datasets
__SCREAMING_SNAKE_CASE = "superb/wav2vec2-base-superb-ks"
__SCREAMING_SNAKE_CASE = pipeline("audio-classification" , model=UpperCamelCase__ )
__SCREAMING_SNAKE_CASE = datasets.load_dataset("anton-l/superb_dummy" , "ks" , split="test" )
__SCREAMING_SNAKE_CASE = np.array(dataset[3]["speech"] , dtype=np.floataa )
__SCREAMING_SNAKE_CASE = audio_classifier(UpperCamelCase__ , top_k=4 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=3 ) , [
{"score": 0.981, "label": "go"},
{"score": 0.007, "label": "up"},
{"score": 0.006, "label": "_unknown_"},
{"score": 0.001, "label": "down"},
] , )
@require_tf
@unittest.skip("Audio classification is not implemented for TF" )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
pass
| 682 | '''simple docstring'''
from typing import List
from .keymap import KEYMAP, get_character
def lowerCamelCase__ ( A_ ):
def decorator(A_ ):
UpperCAmelCase_ = getattr(A_ , "handle_key" , [] )
handle += [key]
setattr(A_ , "handle_key" , A_ )
return func
return decorator
def lowerCamelCase__ ( *A_ ):
def decorator(A_ ):
UpperCAmelCase_ = getattr(A_ , "handle_key" , [] )
handle += keys
setattr(A_ , "handle_key" , A_ )
return func
return decorator
class lowercase_ ( _A ):
def __new__( cls , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = super().__new__(cls , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not hasattr(UpperCamelCase__ , "key_handler" ):
setattr(UpperCamelCase__ , "key_handler" , {} )
setattr(UpperCamelCase__ , "handle_input" , KeyHandler.handle_input )
for value in attrs.values():
UpperCAmelCase_ = getattr(UpperCamelCase__ , "handle_key" , [] )
for key in handled_keys:
UpperCAmelCase_ = value
return new_cls
@staticmethod
def lowerCamelCase_ ( cls ) -> str:
"""simple docstring"""
UpperCAmelCase_ = get_character()
if char != KEYMAP["undefined"]:
UpperCAmelCase_ = ord(UpperCamelCase__ )
UpperCAmelCase_ = cls.key_handler.get(UpperCamelCase__ )
if handler:
UpperCAmelCase_ = char
return handler(cls )
else:
return None
def lowerCamelCase__ ( cls ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 660 | 0 |
"""simple docstring"""
from __future__ import annotations
def _A( lowerCAmelCase ):
A__ : Any = len(A_ )
# We need to create solution object to save path.
A__ : List[str] = [[0 for _ in range(A_ )] for _ in range(A_ )]
A__ : Tuple = run_maze(A_ , 0 , 0 , A_ )
if solved:
print("""\n""".join(str(A_ ) for row in solutions ) )
else:
print("""No solution exists!""" )
return solved
def _A( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
A__ : List[Any] = len(A_ )
# Final check point.
if i == j == (size - 1):
A__ : Dict = 1
return True
A__ : Any = (not i < 0) and (not j < 0) # Check lower bounds
A__ : Dict = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
A__ : List[str] = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
A__ : List[str] = 1
# check for directions
if (
run_maze(A_ , i + 1 , A_ , A_ )
or run_maze(A_ , A_ , j + 1 , A_ )
or run_maze(A_ , i - 1 , A_ , A_ )
or run_maze(A_ , A_ , j - 1 , A_ )
):
return True
A__ : str = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363 | '''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__snake_case : Optional[Any] = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowercase_ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=1_6 , UpperCamelCase__=1_3 , UpperCamelCase__=7 , UpperCamelCase__=1_4 , UpperCamelCase__=1_0 , UpperCamelCase__=1_9 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=True , UpperCamelCase__=1_6 , UpperCamelCase__=2 , UpperCamelCase__=4 , UpperCamelCase__=4 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=[1, 2, 3, 4, 5] , UpperCamelCase__=2_5 , UpperCamelCase__=5 , ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = d_model
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = prediction_length
UpperCAmelCase_ = context_length
UpperCAmelCase_ = cardinality
UpperCAmelCase_ = num_time_features
UpperCAmelCase_ = lags_sequence
UpperCAmelCase_ = embedding_dimension
UpperCAmelCase_ = is_training
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = context_length
UpperCAmelCase_ = prediction_length + label_length
UpperCAmelCase_ = label_length
UpperCAmelCase_ = moving_average
UpperCAmelCase_ = autocorrelation_factor
def lowerCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = config.context_length + max(config.lags_sequence )
UpperCAmelCase_ = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
UpperCAmelCase_ = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
UpperCAmelCase_ = floats_tensor([self.batch_size, _past_length] )
UpperCAmelCase_ = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
UpperCAmelCase_ = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
UpperCAmelCase_ = floats_tensor([self.batch_size, config.prediction_length] )
UpperCAmelCase_ = {
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = self.get_config()
UpperCAmelCase_ = self.prepare_autoformer_inputs_dict(UpperCamelCase__ )
return config, inputs_dict
def lowerCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> str:
"""simple docstring"""
UpperCAmelCase_ = AutoformerModel(config=UpperCamelCase__ ).to(UpperCamelCase__ ).eval()
UpperCAmelCase_ = model(**UpperCamelCase__ )
UpperCAmelCase_ = outputs.encoder_last_hidden_state
UpperCAmelCase_ = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = model.get_encoder()
encoder.save_pretrained(UpperCamelCase__ )
UpperCAmelCase_ = AutoformerEncoder.from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = model.create_network_inputs(**UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
UpperCAmelCase_ = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
UpperCAmelCase_ = encoder(inputs_embeds=UpperCamelCase__ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
UpperCAmelCase_ = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
UpperCAmelCase_ = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
UpperCAmelCase_ = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
UpperCAmelCase_ = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = model.get_decoder()
decoder.save_pretrained(UpperCamelCase__ )
UpperCAmelCase_ = AutoformerDecoder.from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
UpperCAmelCase_ = decoder(
trend=UpperCamelCase__ , inputs_embeds=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class lowercase_ ( _A , _A , unittest.TestCase ):
a_ = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
a_ = (AutoformerForPrediction,) if is_torch_available() else ()
a_ = {"""feature-extraction""": AutoformerModel} if is_torch_available() else {}
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = AutoformerModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = model_class.from_pretrained(UpperCamelCase__ , output_loading_info=UpperCamelCase__ )
self.assertEqual(info["missing_keys"] , [] )
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*UpperCamelCase__ )
@unittest.skip(reason="Model has no tokens embeddings" )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
pass
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase_ = inspect.signature(getattr(UpperCamelCase__ , "forward" ) )
# The main input is the name of the argument after `self`
UpperCAmelCase_ = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , UpperCamelCase__ )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(UpperCamelCase__ )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = [
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(UpperCamelCase__ )] , UpperCamelCase__ )
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = True
UpperCAmelCase_ = getattr(self.model_tester , "seq_length" , UpperCamelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , "decoder_seq_length" , UpperCamelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , "encoder_seq_length" , UpperCamelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , "d_model" , UpperCamelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , "num_attention_heads" , UpperCamelCase__ )
UpperCAmelCase_ = d_model // num_attention_heads
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
UpperCAmelCase_ = False
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
UpperCAmelCase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
UpperCAmelCase_ = outputs.encoder_attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
UpperCAmelCase_ = len(UpperCamelCase__ )
UpperCAmelCase_ = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
# decoder attentions
UpperCAmelCase_ = outputs.decoder_attentions
self.assertIsInstance(UpperCamelCase__ , (list, tuple) )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
UpperCAmelCase_ = outputs.cross_attentions
self.assertIsInstance(UpperCamelCase__ , (list, tuple) )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
UpperCAmelCase_ = True
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + 2 , len(UpperCamelCase__ ) )
UpperCAmelCase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def lowerCamelCase_ ( self ) -> str:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def lowerCamelCase__ ( A_="train-batch.pt" ):
UpperCAmelCase_ = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=A_ , repo_type="dataset" )
UpperCAmelCase_ = torch.load(A_ , map_location=A_ )
return batch
@require_torch
@slow
class lowercase_ ( unittest.TestCase ):
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(UpperCamelCase__ )
UpperCAmelCase_ = prepare_batch()
with torch.no_grad():
UpperCAmelCase_ = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
UpperCAmelCase_ = torch.Size(
(6_4, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , UpperCamelCase__ )
UpperCAmelCase_ = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
def lowerCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(UpperCamelCase__ )
UpperCAmelCase_ = prepare_batch("val-batch.pt" )
with torch.no_grad():
UpperCAmelCase_ = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
UpperCAmelCase_ = torch.Size((6_4, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , UpperCamelCase__ )
UpperCAmelCase_ = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(UpperCamelCase__ )
UpperCAmelCase_ = prepare_batch("val-batch.pt" )
with torch.no_grad():
UpperCAmelCase_ = model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
UpperCAmelCase_ = torch.Size((6_4, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , UpperCamelCase__ )
UpperCAmelCase_ = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=UpperCamelCase__ )
UpperCAmelCase_ = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , UpperCamelCase__ , rtol=1e-1 ) )
| 660 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a : Optional[int] = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
a : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 613 | '''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case : Dict = logging.get_logger(__name__)
__snake_case : Tuple = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
__snake_case : Tuple = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
__snake_case : Dict = {
'''abeja/gpt-neox-japanese-2.7b''': 20_48,
}
def lowerCamelCase__ ( A_ , A_ ):
with open(A_ , "r" , encoding="utf-8" ) as f:
UpperCAmelCase_ = json.loads(f.read() )
UpperCAmelCase_ = collections.OrderedDict()
UpperCAmelCase_ = collections.OrderedDict()
UpperCAmelCase_ = collections.OrderedDict()
with open(A_ , "r" , encoding="utf-8" ) as f:
UpperCAmelCase_ = f.readlines()
UpperCAmelCase_ = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(A_ ):
UpperCAmelCase_ = b
UpperCAmelCase_ = idx
for wd in b:
UpperCAmelCase_ = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class lowercase_ ( _A ):
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|startoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__=False , **UpperCamelCase__ , ) -> int:
"""simple docstring"""
super().__init__(
unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , do_clean_text=UpperCamelCase__ , **UpperCamelCase__ , )
if not os.path.isfile(UpperCamelCase__ ):
raise ValueError(
F"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
if not os.path.isfile(UpperCamelCase__ ):
raise ValueError(
F"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
UpperCAmelCase_ = do_clean_text
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = load_vocab_and_emoji(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase_ = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
return len(self.raw_vocab )
def lowerCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
return self.subword_tokenizer.tokenize(UpperCamelCase__ , clean=self.do_clean_text )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> int:
"""simple docstring"""
return self.vocab.get(UpperCamelCase__ , self.vocab.get(self.unk_token ) )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[Any]:
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = "".join(UpperCamelCase__ ).strip()
return out_string
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[int]:
"""simple docstring"""
UpperCAmelCase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] )
if len(UpperCamelCase__ ) > self.model_max_length:
UpperCAmelCase_ = input_ids[-self.model_max_length :]
return input_ids
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
"""simple docstring"""
UpperCAmelCase_ = 0
if os.path.isdir(UpperCamelCase__ ):
UpperCAmelCase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] )
else:
UpperCAmelCase_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
UpperCAmelCase_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!" )
UpperCAmelCase_ = token_index
writer.write(",".join(UpperCamelCase__ ) + "\n" )
index += 1
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as writer:
json.dump(self.emoji , UpperCamelCase__ )
return vocab_file, emoji_file
class lowercase_ ( _A ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = vocab # same as swe
UpperCAmelCase_ = ids_to_tokens # same as bpe
UpperCAmelCase_ = emoji
UpperCAmelCase_ = np.max([len(UpperCamelCase__ ) for w in self.vocab.keys()] )
UpperCAmelCase_ = re.compile(R"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)" )
UpperCAmelCase_ = re.compile(R"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*" )
UpperCAmelCase_ = re.compile(R"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}" )
UpperCAmelCase_ = re.compile(
R"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
UpperCAmelCase_ = re.compile(
R"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
UpperCAmelCase_ = re.compile(
R"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" )
UpperCAmelCase_ = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
UpperCAmelCase_ = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
UpperCAmelCase_ = str.maketrans({k: "<BLOCK>" for k in keisen + blocks} )
def __len__( self ) -> int:
"""simple docstring"""
return len(self.ids_to_tokens )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = self.content_repattera.sub("<URL>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<EMAIL>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<TEL>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<DATE>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<DATE>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<PRICE>" , UpperCamelCase__ )
UpperCAmelCase_ = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
UpperCAmelCase_ = content.replace("<BLOCK><BLOCK>" , "<BLOCK>" )
return content
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__=False ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = text.replace(" " , "<SP>" )
UpperCAmelCase_ = text.replace(" " , "<SP>" )
UpperCAmelCase_ = text.replace("\r\n" , "<BR>" )
UpperCAmelCase_ = text.replace("\n" , "<BR>" )
UpperCAmelCase_ = text.replace("\r" , "<BR>" )
UpperCAmelCase_ = text.replace("\t" , "<TAB>" )
UpperCAmelCase_ = text.replace("—" , "ー" )
UpperCAmelCase_ = text.replace("−" , "ー" )
for k, v in self.emoji["emoji"].items():
if k in text:
UpperCAmelCase_ = text.replace(UpperCamelCase__ , UpperCamelCase__ )
if clean:
UpperCAmelCase_ = self.clean_text(UpperCamelCase__ )
def check_simbol(UpperCamelCase__ ):
UpperCAmelCase_ = x.encode()
if len(UpperCamelCase__ ) == 1 and len(UpperCamelCase__ ) == 2:
UpperCAmelCase_ = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0Xc_2_a_1 and c <= 0Xc_2_b_f)
or (c >= 0Xc_7_8_0 and c <= 0Xc_7_8_3)
or (c >= 0Xc_a_b_9 and c <= 0Xc_b_b_f)
or (c >= 0Xc_c_8_0 and c <= 0Xc_d_a_2)
):
return True
return False
def checkuae(UpperCamelCase__ ):
UpperCAmelCase_ = x.encode()
if len(UpperCamelCase__ ) == 1 and len(UpperCamelCase__ ) == 3:
UpperCAmelCase_ = (int(e[0] ) << 1_6) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0Xe_2_8_0_8_0 and c <= 0Xe_2_b_0_7_f:
return True
return False
UpperCAmelCase_ = 0
UpperCAmelCase_ = []
while pos < len(UpperCamelCase__ ):
UpperCAmelCase_ = min(len(UpperCamelCase__ ) , pos + self.maxlen + 1 ) if text[pos] == "<" else pos + 3
UpperCAmelCase_ = [] # (token_id, token, pos)
for e in range(UpperCamelCase__ , UpperCamelCase__ , -1 ):
UpperCAmelCase_ = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(UpperCamelCase__ ) > 2:
UpperCAmelCase_ = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(UpperCamelCase__ ) > 0:
# the smallest token_id is adopted
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x[0] )[0]
result.append(UpperCamelCase__ )
UpperCAmelCase_ = e
else:
UpperCAmelCase_ = pos + 1
UpperCAmelCase_ = text[pos:end]
if check_simbol(UpperCamelCase__ ):
result.append("<KIGOU>" )
elif checkuae(UpperCamelCase__ ):
result.append("<U2000U2BFF>" )
else:
for i in wd.encode("utf-8" ):
result.append("<|byte%d|>" % i )
UpperCAmelCase_ = end
return result
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__="\n" ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(UpperCamelCase__ ) > 0:
words.append(bytearray(UpperCamelCase__ ).decode("utf-8" , errors="replace" ) )
UpperCAmelCase_ = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word] )
elif word == "<SP>":
words.append(" " )
elif word == "<BR>":
words.append(UpperCamelCase__ )
elif word == "<TAB>":
words.append("\t" )
elif word == "<BLOCK>":
words.append("▀" )
elif word == "<KIGOU>":
words.append("ǀ" )
elif word == "<U2000U2BFF>":
words.append("‖" )
else:
words.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
words.append(bytearray(UpperCamelCase__ ).decode("utf-8" , errors="replace" ) )
UpperCAmelCase_ = "".join(UpperCamelCase__ )
return text
| 660 | 0 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
a :Dict = logging.get_logger(__name__)
a :List[Any] = {'''vocab_file''': '''vocab.txt'''}
a :Optional[int] = {
'''vocab_file''': {
'''facebook/esm2_t6_8M_UR50D''': '''https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt''',
'''facebook/esm2_t12_35M_UR50D''': '''https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt''',
},
}
a :List[Any] = {
'''facebook/esm2_t6_8M_UR50D''': 1_024,
'''facebook/esm2_t12_35M_UR50D''': 1_024,
}
def _lowercase ( __lowerCAmelCase ) -> str:
with open(A_ , """r""" ) as f:
SCREAMING_SNAKE_CASE__ : int = f.read().splitlines()
return [l.strip() for l in lines]
class __a (_A):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Dict = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE :Dict = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE :List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE :List[str] = ["""input_ids""", """attention_mask"""]
def __init__( self , _a , _a="<unk>" , _a="<cls>" , _a="<pad>" , _a="<mask>" , _a="<eos>" , **_a , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = load_vocab_file(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Tuple = dict(enumerate(self.all_tokens ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {tok: ind for ind, tok in enumerate(self.all_tokens )}
SCREAMING_SNAKE_CASE__ : Tuple = unk_token
SCREAMING_SNAKE_CASE__ : List[str] = cls_token
SCREAMING_SNAKE_CASE__ : Dict = pad_token
SCREAMING_SNAKE_CASE__ : List[str] = mask_token
SCREAMING_SNAKE_CASE__ : Dict = eos_token
SCREAMING_SNAKE_CASE__ : Any = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def _a ( self , _a ) -> str:
"""simple docstring"""
return self._id_to_token.get(UpperCamelCase__ , self.unk_token )
def _a ( self , _a ) -> int:
"""simple docstring"""
return self._token_to_id.get(UpperCamelCase__ , self._token_to_id.get(self.unk_token ) )
def _a ( self , _a , **_a ) -> Tuple:
"""simple docstring"""
return text.split()
def _a ( self , _a=False ) -> Optional[int]:
"""simple docstring"""
return len(self._id_to_token )
def _a ( self ) -> Any:
"""simple docstring"""
return {token: i for i, token in enumerate(self.all_tokens )}
def _a ( self , _a ) -> int:
"""simple docstring"""
return self._token_to_id.get(UpperCamelCase__ , self._token_to_id.get(self.unk_token ) )
def _a ( self , _a ) -> str:
"""simple docstring"""
return self._id_to_token.get(UpperCamelCase__ , self.unk_token )
def _a ( self , _a , _a = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ : List[Any] = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("""Cannot tokenize multiple sequences when EOS token is not set!""" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def _a ( self , _a , _a = None , _a = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
SCREAMING_SNAKE_CASE__ : str = [1] + ([0] * len(UpperCamelCase__ )) + [1]
if token_ids_a is not None:
mask += [0] * len(UpperCamelCase__ ) + [1]
return mask
def _a ( self , _a , _a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = os.path.join(UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + """vocab.txt""" )
with open(UpperCamelCase__ , """w""" ) as f:
f.write("""\n""".join(self.all_tokens ) )
return (vocab_file,)
@property
def _a ( self ) -> int:
"""simple docstring"""
return self.get_vocab_size(with_added_tokens=UpperCamelCase__ )
def _a ( self , _a , _a = False ) -> int:
"""simple docstring"""
return super()._add_tokens(UpperCamelCase__ , special_tokens=UpperCamelCase__ )
| 680 | '''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
__snake_case : Union[str, Any] = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def lowerCamelCase__ ( ):
UpperCAmelCase_ = Github(os.environ["GITHUB_TOKEN"] )
UpperCAmelCase_ = g.get_repo("huggingface/diffusers" )
UpperCAmelCase_ = repo.get_issues(state="open" )
for issue in open_issues:
UpperCAmelCase_ = sorted(issue.get_comments() , key=lambda A_ : i.created_at , reverse=A_ )
UpperCAmelCase_ = comments[0] if len(A_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="closed" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open" )
issue.remove_from_labels("stale" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
issue.add_to_labels("stale" )
if __name__ == "__main__":
main()
| 660 | 0 |
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def A_ ( lowercase = 100_0000 , lowercase = 10 ) -> str:
"""simple docstring"""
UpperCAmelCase_ : str = defaultdict(A_ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
UpperCAmelCase_ : List[str] = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
UpperCAmelCase_ : Union[str, Any] = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(A_ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 470 | '''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
__snake_case : List[Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowercase_ ( datasets.BuilderConfig ):
a_ = 1_0000
a_ = None
a_ = None
class lowercase_ ( datasets.ArrowBasedBuilder ):
a_ = ParquetConfig
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
UpperCAmelCase_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCamelCase__ , (str, list, tuple) ):
UpperCAmelCase_ = data_files
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCAmelCase_ = [dl_manager.iter_files(UpperCamelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
UpperCAmelCase_ = []
for split_name, files in data_files.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCAmelCase_ = [dl_manager.iter_files(UpperCamelCase__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(UpperCamelCase__ ):
with open(UpperCamelCase__ , "rb" ) as f:
UpperCAmelCase_ = datasets.Features.from_arrow_schema(pq.read_schema(UpperCamelCase__ ) )
break
splits.append(datasets.SplitGenerator(name=UpperCamelCase__ , gen_kwargs={"files": files} ) )
return splits
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> pa.Table:
"""simple docstring"""
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCAmelCase_ = table_cast(UpperCamelCase__ , self.info.features.arrow_schema )
return pa_table
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" )
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCamelCase__ ) ):
with open(UpperCamelCase__ , "rb" ) as f:
UpperCAmelCase_ = pq.ParquetFile(UpperCamelCase__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
UpperCAmelCase_ = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F"""{file_idx}_{batch_idx}""", self._cast_table(UpperCamelCase__ )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(UpperCamelCase__ )}: {e}""" )
raise
| 660 | 0 |
'''simple docstring'''
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
_A: Tuple = ['''\nclass''', '''\ndef''', '''\n#''', '''\n@''', '''\nprint''', '''\nif''']
class UpperCAmelCase ( _A ):
def __init__( self , __A , __A , __A=None , __A=1 ):
__UpperCAmelCase = tokenizer
__UpperCAmelCase = dataset
__UpperCAmelCase = len(UpperCamelCase__ ) if n_tasks is None else n_tasks
__UpperCAmelCase = n_copies
def __iter__( self ):
__UpperCAmelCase = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
__UpperCAmelCase = self.tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class UpperCAmelCase ( _A ):
def __init__( self , __A , __A , __A ):
__UpperCAmelCase = start_length
__UpperCAmelCase = eof_strings
__UpperCAmelCase = tokenizer
def __call__( self , __A , __A , **__A ):
__UpperCAmelCase = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
__UpperCAmelCase = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(UpperCamelCase__ )
def _lowerCAmelCase ( _lowerCAmelCase )-> List[Any]:
__UpperCAmelCase = re.split('(%s)' % '|'.join(A_ ) , A_ )
# last string should be ""
return "".join(string_list[:-2] )
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=20 , **_lowerCAmelCase )-> List[str]:
__UpperCAmelCase = defaultdict(A_ ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(A_ ) ):
with torch.no_grad():
__UpperCAmelCase = batch['ids'].shape[-1]
__UpperCAmelCase = accelerator.unwrap_model(A_ ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=A_ , **A_ )
# each task is generated batch_size times
__UpperCAmelCase = batch['task_id'].repeat(A_ )
__UpperCAmelCase = accelerator.pad_across_processes(
A_ , dim=1 , pad_index=tokenizer.pad_token_id )
__UpperCAmelCase , __UpperCAmelCase = accelerator.gather((generated_tokens, generated_tasks) )
__UpperCAmelCase = generated_tokens.cpu().numpy()
__UpperCAmelCase = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(A_ , A_ ):
gen_token_dict[task].append(A_ )
__UpperCAmelCase = [[] for _ in range(A_ )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
__UpperCAmelCase = tokenizer.decode(A_ , skip_special_tokens=A_ , clean_up_tokenization_spaces=A_ )
code_gens[task].append(remove_last_block(A_ ) )
return code_gens
def _lowerCAmelCase ( )-> int:
# Setup configuration
__UpperCAmelCase = HfArgumentParser(A_ )
__UpperCAmelCase = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
__UpperCAmelCase = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
__UpperCAmelCase = 'false'
if args.num_workers is None:
__UpperCAmelCase = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
__UpperCAmelCase = Accelerator()
set_seed(args.seed , device_specific=A_ )
# Load model and tokenizer
__UpperCAmelCase = AutoTokenizer.from_pretrained(args.model_ckpt )
__UpperCAmelCase = tokenizer.eos_token
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
__UpperCAmelCase = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , A_ , A_ )] ),
}
# Load evaluation dataset and metric
__UpperCAmelCase = load_dataset('openai_humaneval' )
__UpperCAmelCase = load_metric('code_eval' )
__UpperCAmelCase = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
__UpperCAmelCase = args.n_samples // args.batch_size
__UpperCAmelCase = TokenizedDataset(A_ , human_eval['test'] , n_copies=A_ , n_tasks=A_ )
# do not confuse args.batch_size, which is actually the num_return_sequences
__UpperCAmelCase = DataLoader(A_ , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
__UpperCAmelCase = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`'
' flag to enable code evaluation.' )
raise exception
__UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(A_ , A_ )
__UpperCAmelCase = complete_code(
A_ , A_ , A_ , A_ , n_tasks=A_ , batch_size=args.batch_size , **A_ , )
if accelerator.is_main_process:
__UpperCAmelCase = []
for task in tqdm(range(A_ ) ):
__UpperCAmelCase = human_eval['test'][task]['test']
__UpperCAmelCase = F'check({human_eval["test"][task]["entry_point"]})'
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
__UpperCAmelCase , __UpperCAmelCase = code_eval_metric.compute(
references=A_ , predictions=A_ , num_workers=args.num_workers )
print(F'Results: {pass_at_k}' )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(A_ , A_ )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 126 | '''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case : Tuple = logging.get_logger(__name__)
__snake_case : Tuple = {'''vocab_file''': '''spiece.model'''}
__snake_case : Dict = {
'''vocab_file''': {
'''AI-Sweden/gpt-sw3-126m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-350m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-1.6b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-6.7b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-20b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model''',
}
}
__snake_case : Tuple = {
'''AI-Sweden/gpt-sw3-126m''': 20_48,
'''AI-Sweden/gpt-sw3-350m''': 20_48,
'''AI-Sweden/gpt-sw3-1.6b''': 20_48,
'''AI-Sweden/gpt-sw3-6.7b''': 20_48,
'''AI-Sweden/gpt-sw3-20b''': 20_48,
}
class lowercase_ ( _A ):
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self , UpperCamelCase__ , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> None:
"""simple docstring"""
UpperCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCAmelCase_ = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
UpperCAmelCase_ = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
UpperCAmelCase_ = "<|endoftext|>" if eos_token is None else eos_token
UpperCAmelCase_ = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
UpperCAmelCase_ = unk_token if pad_token is None else pad_token
UpperCAmelCase_ = eos_token if bos_token is None else bos_token
else:
UpperCAmelCase_ = "<pad>" if pad_token is None else pad_token
UpperCAmelCase_ = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = remove_space
UpperCAmelCase_ = keep_accents
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
# Used for whitespace normalization in input texts
# fmt : off
UpperCAmelCase_ = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
UpperCAmelCase_ = re.compile(
F"""[{"".join(map(UpperCamelCase__ , list(range(0 , 9 ) ) + list(range(1_1 , 3_2 ) ) + list(range(1_2_7 , 1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]""" )
def __getstate__( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = self.__dict__.copy()
UpperCAmelCase_ = None
return state
def __setstate__( self , UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
return len(self.sp_model )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
UpperCAmelCase_ = self.non_printing_characters_re.sub("" , UpperCamelCase__ )
# Normalize whitespaces
UpperCAmelCase_ = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
UpperCAmelCase_ = unicodedata.normalize("NFC" , UpperCamelCase__ )
return text
def lowerCamelCase_ ( self , UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = self.preprocess_text(UpperCamelCase__ )
return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> int:
"""simple docstring"""
return self.sp_model.PieceToId(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
return self.sp_model.IdToPiece(UpperCamelCase__ )
@staticmethod
def lowerCamelCase_ ( UpperCamelCase__ ) -> str:
"""simple docstring"""
return out_string
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = ""
UpperCAmelCase_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCamelCase__ ) + token
UpperCAmelCase_ = True
UpperCAmelCase_ = []
else:
current_sub_tokens.append(UpperCamelCase__ )
UpperCAmelCase_ = False
out_string += self.sp_model.decode(UpperCamelCase__ )
return out_string
def lowerCamelCase_ ( self ) -> Dict[str, int]:
"""simple docstring"""
UpperCAmelCase_ = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , "wb" ) as fi:
UpperCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
"""simple docstring"""
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = self.preprocess_text(UpperCamelCase__ )
UpperCAmelCase_ = self.sp_model.encode(UpperCamelCase__ )
else:
UpperCAmelCase_ = [self.preprocess_text(UpperCamelCase__ ) for t in text]
UpperCAmelCase_ = self.sp_model.encode(UpperCamelCase__ )
if return_tensors is True or return_tensors == "pt":
UpperCAmelCase_ = torch.tensor(UpperCamelCase__ )
return token_ids
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
return self.sp_model.decode(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[int]:
"""simple docstring"""
UpperCAmelCase_ = [F"""User: {text}""" if is_user else F"""Bot: {text}""" for is_user, text in conversation.iter_texts()]
UpperCAmelCase_ = (
F"""{self.eos_token}{self.bos_token}""" + F"""{self.bos_token}""".join(UpperCamelCase__ ) + F"""{self.bos_token}Bot:"""
)
return self.encode(text=UpperCamelCase__ )
| 660 | 0 |
def __lowerCamelCase ( __lowerCAmelCase : Tuple ) -> Any:
if a < 0:
raise ValueError("""Input value must be a positive integer""" )
elif isinstance(A_ , A_ ):
raise TypeError("""Input value must be a 'int' type""" )
return bin(A_ ).count("""1""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 269 | '''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowercase_ ( unittest.TestCase ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__=7 , UpperCamelCase__=3 , UpperCamelCase__=1_8 , UpperCamelCase__=3_0 , UpperCamelCase__=4_0_0 , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=[0.5, 0.5, 0.5] , UpperCamelCase__=[0.5, 0.5, 0.5] , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = size if size is not None else {"shortest_edge": 1_8}
UpperCAmelCase_ = crop_size if crop_size is not None else {"height": 1_8, "width": 1_8}
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
def lowerCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowercase_ ( _A , unittest.TestCase ):
a_ = LevitImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = LevitImageProcessingTester(self )
@property
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , "image_mean" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "image_std" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_resize" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_center_crop" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "size" ) )
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 1_8} )
self.assertEqual(image_processor.crop_size , {"height": 1_8, "width": 1_8} )
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2} )
self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4} )
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
pass
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCamelCase_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 660 | 0 |
'''simple docstring'''
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class lowercase__ ( _A ):
'''simple docstring'''
A_ : List[str] = """microsoft/speecht5_tts"""
A_ : str = (
"""This is a tool that reads an English text out loud. It takes an input named `text` which should contain the """
"""text to read (in English) and returns a waveform object containing the sound."""
)
A_ : List[Any] = """text_reader"""
A_ : List[str] = SpeechTaProcessor
A_ : Optional[Any] = SpeechTaForTextToSpeech
A_ : Union[str, Any] = SpeechTaHifiGan
A_ : int = ["""text"""]
A_ : Tuple = ["""audio"""]
def UpperCAmelCase_ ( self ):
if self.post_processor is None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = """microsoft/speecht5_hifigan"""
super().setup()
def UpperCAmelCase_ ( self , __snake_case , __snake_case=None ):
_SCREAMING_SNAKE_CASE : List[str] = self.pre_processor(text=UpperCamelCase__ , return_tensors="""pt""" , truncation=UpperCamelCase__ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
_SCREAMING_SNAKE_CASE : Dict = load_dataset("""Matthijs/cmu-arctic-xvectors""" , split="""validation""" )
_SCREAMING_SNAKE_CASE : str = torch.tensor(embeddings_dataset[7305]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def UpperCAmelCase_ ( self , __snake_case ):
with torch.no_grad():
return self.model.generate_speech(**UpperCamelCase__ )
def UpperCAmelCase_ ( self , __snake_case ):
with torch.no_grad():
return self.post_processor(UpperCamelCase__ ).cpu().detach()
| 533 | '''simple docstring'''
from __future__ import annotations
import math
from collections.abc import Callable
def lowerCamelCase__ ( A_ , A_ , A_ , A_ = 100 , ):
UpperCAmelCase_ = x_start
UpperCAmelCase_ = fnc(A_ )
UpperCAmelCase_ = 0.0
for _ in range(A_ ):
# Approximates curve as a sequence of linear lines and sums their length
UpperCAmelCase_ = (x_end - x_start) / steps + xa
UpperCAmelCase_ = fnc(A_ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
UpperCAmelCase_ = xa
UpperCAmelCase_ = fxa
return length
if __name__ == "__main__":
def lowerCamelCase__ ( A_ ):
return math.sin(10 * x )
print('''f(x) = sin(10 * x)''')
print('''The length of the curve from x = -10 to x = 10 is:''')
__snake_case : List[Any] = 10
while i <= 10_00_00:
print(F'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 660 | 0 |
import numpy as np
__SCREAMING_SNAKE_CASE =[
['''a''', '''b''', '''c''', '''d''', '''e'''],
['''f''', '''g''', '''h''', '''i''', '''k'''],
['''l''', '''m''', '''n''', '''o''', '''p'''],
['''q''', '''r''', '''s''', '''t''', '''u'''],
['''v''', '''w''', '''x''', '''y''', '''z'''],
]
class __magic_name__ :
'''simple docstring'''
def __init__( self: List[str] ):
SCREAMING_SNAKE_CASE_ = np.array(UpperCamelCase__ )
def _A ( self: int , _lowerCamelCase: int ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = np.where(letter == self.SQUARE )
SCREAMING_SNAKE_CASE_ = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def _A ( self: Optional[int] , _lowerCamelCase: Tuple , _lowerCamelCase: Any ):
SCREAMING_SNAKE_CASE_ = self.SQUARE[indexa - 1, indexa - 1]
return letter
def _A ( self: Optional[Any] , _lowerCamelCase: Optional[int] ):
SCREAMING_SNAKE_CASE_ = message.lower()
SCREAMING_SNAKE_CASE_ = message.replace(''' ''' , '''''' )
SCREAMING_SNAKE_CASE_ = message.replace('''j''' , '''i''' )
SCREAMING_SNAKE_CASE_ = np.empty((2, len(UpperCamelCase__ )) )
for letter_index in range(len(UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE_ = self.letter_to_numbers(message[letter_index] )
SCREAMING_SNAKE_CASE_ = numbers[0]
SCREAMING_SNAKE_CASE_ = numbers[1]
SCREAMING_SNAKE_CASE_ = first_step.reshape(2 * len(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE_ = ''''''
for numbers_index in range(len(UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE_ = int(second_step[numbers_index * 2] )
SCREAMING_SNAKE_CASE_ = int(second_step[(numbers_index * 2) + 1] )
SCREAMING_SNAKE_CASE_ = self.numbers_to_letter(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = encoded_message + letter
return encoded_message
def _A ( self: Any , _lowerCamelCase: Optional[int] ):
SCREAMING_SNAKE_CASE_ = message.lower()
message.replace(''' ''' , '''''' )
SCREAMING_SNAKE_CASE_ = np.empty(2 * len(UpperCamelCase__ ) )
for letter_index in range(len(UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE_ = self.letter_to_numbers(message[letter_index] )
SCREAMING_SNAKE_CASE_ = numbers[0]
SCREAMING_SNAKE_CASE_ = numbers[1]
SCREAMING_SNAKE_CASE_ = first_step.reshape((2, len(UpperCamelCase__ )) )
SCREAMING_SNAKE_CASE_ = ''''''
for numbers_index in range(len(UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE_ = int(second_step[0, numbers_index] )
SCREAMING_SNAKE_CASE_ = int(second_step[1, numbers_index] )
SCREAMING_SNAKE_CASE_ = self.numbers_to_letter(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = decoded_message + letter
return decoded_message
| 234 | '''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class lowercase_ ( _A ):
a_ = """"""
a_ = """hf-legacy""" # "hf://"" is reserved for hffs
def __init__( self , UpperCamelCase__ = None , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> Tuple:
"""simple docstring"""
super().__init__(self , **UpperCamelCase__ )
UpperCAmelCase_ = repo_info
UpperCAmelCase_ = token
UpperCAmelCase_ = None
def lowerCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
if self.dir_cache is None:
UpperCAmelCase_ = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
UpperCAmelCase_ = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(UpperCamelCase__ ): {"name": str(UpperCamelCase__ ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = "rb" , **UpperCamelCase__ , ) -> Optional[int]:
"""simple docstring"""
if not isinstance(self.repo_info , UpperCamelCase__ ):
raise NotImplementedError(F"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
UpperCAmelCase_ = hf_hub_url(self.repo_info.id , UpperCamelCase__ , revision=self.repo_info.sha )
return fsspec.open(
UpperCamelCase__ , mode=UpperCamelCase__ , headers=get_authentication_headers_for_url(UpperCamelCase__ , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open()
def lowerCamelCase_ ( self , UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
"""simple docstring"""
self._get_dirs()
UpperCAmelCase_ = self._strip_protocol(UpperCamelCase__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__=False , **UpperCamelCase__ ) -> str:
"""simple docstring"""
self._get_dirs()
UpperCAmelCase_ = PurePosixPath(path.strip("/" ) )
UpperCAmelCase_ = {}
for p, f in self.dir_cache.items():
UpperCAmelCase_ = PurePosixPath(p.strip("/" ) )
UpperCAmelCase_ = p.parent
if root == path:
UpperCAmelCase_ = f
UpperCAmelCase_ = list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out )
| 660 | 0 |
from __future__ import annotations
__UpperCAmelCase = list[tuple[int, int]]
__UpperCAmelCase = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__UpperCAmelCase = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> int:
UpperCAmelCase_ : Any = pos_x
UpperCAmelCase_ : Optional[int] = pos_y
UpperCAmelCase_ : List[str] = (pos_y, pos_x)
UpperCAmelCase_ : str = goal_x
UpperCAmelCase_ : int = goal_y
UpperCAmelCase_ : Tuple = g_cost
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : Any = self.calculate_heuristic()
def __UpperCAmelCase ( self ) -> float:
UpperCAmelCase_ : str = abs(self.pos_x - self.goal_x )
UpperCAmelCase_ : int = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , _UpperCamelCase ) -> bool:
return self.f_cost < other.f_cost
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase ) -> Tuple:
UpperCAmelCase_ : Optional[int] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , UpperCamelCase__ )
UpperCAmelCase_ : List[str] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , UpperCamelCase__ )
UpperCAmelCase_ : Tuple = [self.start]
UpperCAmelCase_ : Union[str, Any] = []
UpperCAmelCase_ : List[Any] = False
def __UpperCAmelCase ( self ) -> Path | None:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
UpperCAmelCase_ : Optional[int] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
UpperCAmelCase_ : Any = True
return self.retrace_path(UpperCamelCase__ )
self.closed_nodes.append(UpperCamelCase__ )
UpperCAmelCase_ : str = self.get_successors(UpperCamelCase__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(UpperCamelCase__ )
else:
# retrieve the best current path
UpperCAmelCase_ : Tuple = self.open_nodes.pop(self.open_nodes.index(UpperCamelCase__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(UpperCamelCase__ )
else:
self.open_nodes.append(UpperCamelCase__ )
if not self.reached:
return [self.start.pos]
return None
def __UpperCAmelCase ( self , _UpperCamelCase ) -> list[Node]:
UpperCAmelCase_ : List[str] = []
for action in delta:
UpperCAmelCase_ : List[str] = parent.pos_x + action[1]
UpperCAmelCase_ : int = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(UpperCamelCase__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
UpperCamelCase__ , UpperCamelCase__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , UpperCamelCase__ , ) )
return successors
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Path:
UpperCAmelCase_ : Optional[int] = node
UpperCAmelCase_ : List[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCAmelCase_ : Dict = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
__UpperCAmelCase = (0, 0)
__UpperCAmelCase = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('------')
__UpperCAmelCase = GreedyBestFirst(init, goal)
__UpperCAmelCase = greedy_bf.search()
if path:
for pos_x, pos_y in path:
__UpperCAmelCase = 2
for elem in grid:
print(elem)
| 406 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case : Union[str, Any] = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = ['''PLBartTokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[int] = [
'''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PLBartForCausalLM''',
'''PLBartForConditionalGeneration''',
'''PLBartForSequenceClassification''',
'''PLBartModel''',
'''PLBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
__snake_case : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 660 | 0 |
"""simple docstring"""
import math
import os
import sys
def _UpperCamelCase ( UpperCamelCase ) -> Tuple:
"""simple docstring"""
__UpperCAmelCase : List[Any] = ""
try:
with open(A_ , "rb" ) as binary_file:
__UpperCAmelCase : List[str] = binary_file.read()
for dat in data:
__UpperCAmelCase : Optional[int] = f"{dat:08b}"
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
"""simple docstring"""
lexicon.pop(A_ )
__UpperCAmelCase : Optional[int] = last_match_id
if math.loga(A_ ).is_integer():
for curr_key in lexicon:
__UpperCAmelCase : Tuple = "0" + lexicon[curr_key]
__UpperCAmelCase : List[str] = bin(A_ )[2:]
def _UpperCamelCase ( UpperCamelCase ) -> Dict:
"""simple docstring"""
__UpperCAmelCase : Dict = {"0": "0", "1": "1"}
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = "", ""
__UpperCAmelCase : List[Any] = len(A_ )
for i in range(len(A_ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__UpperCAmelCase : List[Any] = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(A_ , A_ , A_ , A_ )
index += 1
__UpperCAmelCase : Optional[int] = ""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
__UpperCAmelCase : Optional[int] = lexicon[curr_string]
result += last_match_id
return result
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> Dict:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = os.path.getsize(A_ )
__UpperCAmelCase : str = bin(A_ )[2:]
__UpperCAmelCase : Optional[int] = len(A_ )
return "0" * (length_length - 1) + file_length_binary + compressed
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
__UpperCAmelCase : Optional[int] = 8
try:
with open(A_ , "wb" ) as opened_file:
__UpperCAmelCase : Dict = [
to_write[i : i + byte_length]
for i in range(0 , len(A_ ) , A_ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(A_ , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : int = read_file_binary(A_ )
__UpperCAmelCase : int = compress_data(A_ )
__UpperCAmelCase : List[Any] = add_file_length(A_ , A_ )
write_file_binary(A_ , A_ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 77 | '''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__snake_case : List[str] = logging.get_logger(__name__)
class lowercase_ ( _A ):
a_ = """linear"""
a_ = """cosine"""
a_ = """cosine_with_restarts"""
a_ = """polynomial"""
a_ = """constant"""
a_ = """constant_with_warmup"""
a_ = """piecewise_constant"""
def lowerCamelCase__ ( A_ , A_ = -1 ):
return LambdaLR(A_ , lambda A_ : 1 , last_epoch=A_ )
def lowerCamelCase__ ( A_ , A_ , A_ = -1 ):
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1.0 , A_ ) )
return 1.0
return LambdaLR(A_ , A_ , last_epoch=A_ )
def lowerCamelCase__ ( A_ , A_ , A_ = -1 ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = step_rules.split("," )
for rule_str in rule_list[:-1]:
UpperCAmelCase_ , UpperCAmelCase_ = rule_str.split(":" )
UpperCAmelCase_ = int(A_ )
UpperCAmelCase_ = float(A_ )
UpperCAmelCase_ = value
UpperCAmelCase_ = float(rule_list[-1] )
def create_rules_function(A_ , A_ ):
def rule_func(A_ ) -> float:
UpperCAmelCase_ = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(A_ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
UpperCAmelCase_ = create_rules_function(A_ , A_ )
return LambdaLR(A_ , A_ , last_epoch=A_ )
def lowerCamelCase__ ( A_ , A_ , A_ , A_=-1 ):
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1 , A_ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(A_ , A_ , A_ )
def lowerCamelCase__ ( A_ , A_ , A_ , A_ = 0.5 , A_ = -1 ):
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1 , A_ ) )
UpperCAmelCase_ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(A_ ) * 2.0 * progress )) )
return LambdaLR(A_ , A_ , A_ )
def lowerCamelCase__ ( A_ , A_ , A_ , A_ = 1 , A_ = -1 ):
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1 , A_ ) )
UpperCAmelCase_ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(A_ ) * progress) % 1.0) )) )
return LambdaLR(A_ , A_ , A_ )
def lowerCamelCase__ ( A_ , A_ , A_ , A_=1e-7 , A_=1.0 , A_=-1 ):
UpperCAmelCase_ = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1 , A_ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
UpperCAmelCase_ = lr_init - lr_end
UpperCAmelCase_ = num_training_steps - num_warmup_steps
UpperCAmelCase_ = 1 - (current_step - num_warmup_steps) / decay_steps
UpperCAmelCase_ = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(A_ , A_ , A_ )
__snake_case : str = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowerCamelCase__ ( A_ , A_ , A_ = None , A_ = None , A_ = None , A_ = 1 , A_ = 1.0 , A_ = -1 , ):
UpperCAmelCase_ = SchedulerType(A_ )
UpperCAmelCase_ = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(A_ , last_epoch=A_ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(A_ , step_rules=A_ , last_epoch=A_ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(A_ , num_warmup_steps=A_ , last_epoch=A_ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
A_ , num_warmup_steps=A_ , num_training_steps=A_ , num_cycles=A_ , last_epoch=A_ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
A_ , num_warmup_steps=A_ , num_training_steps=A_ , power=A_ , last_epoch=A_ , )
return schedule_func(
A_ , num_warmup_steps=A_ , num_training_steps=A_ , last_epoch=A_ )
| 660 | 0 |
"""simple docstring"""
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("repo_id" , ["canonical_dataset_name", "org-name/dataset-name"] )
@pytest.mark.parametrize("path" , ["filename.csv", "filename with blanks.csv"] )
@pytest.mark.parametrize("revision" , [None, "v2"] )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = hf_hub_url(repo_id=A_ , path=A_ , revision=A_ )
assert url == f"""https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(A_ )}"""
| 682 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case : Optional[int] = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[int] = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[Any] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
__snake_case : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 660 | 0 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class __UpperCAmelCase (_A , _A ):
'''simple docstring'''
@register_to_config
def __init__( self , snake_case_ = 768 , ):
'''simple docstring'''
super().__init__()
A__ : str = nn.Parameter(torch.zeros(1 , UpperCamelCase__ ) )
A__ : Optional[int] = nn.Parameter(torch.ones(1 , UpperCamelCase__ ) )
def lowerCamelCase ( self , snake_case_ = None , snake_case_ = None , ):
'''simple docstring'''
A__ : Any = nn.Parameter(self.mean.to(UpperCamelCase__ ).to(UpperCamelCase__ ) )
A__ : Tuple = nn.Parameter(self.std.to(UpperCamelCase__ ).to(UpperCamelCase__ ) )
return self
def lowerCamelCase ( self , snake_case_ ):
'''simple docstring'''
A__ : List[str] = (embeds - self.mean) * 1.0 / self.std
return embeds
def lowerCamelCase ( self , snake_case_ ):
'''simple docstring'''
A__ : Dict = (embeds * self.std) + self.mean
return embeds
| 363 | '''simple docstring'''
import csv
import tweepy
# Twitter API credentials
__snake_case : Union[str, Any] = ''''''
__snake_case : List[Any] = ''''''
__snake_case : List[str] = ''''''
__snake_case : Any = ''''''
def lowerCamelCase__ ( A_ ):
# authorize twitter, initialize tweepy
UpperCAmelCase_ = tweepy.OAuthHandler(A_ , A_ )
auth.set_access_token(A_ , A_ )
UpperCAmelCase_ = tweepy.API(A_ )
# initialize a list to hold all the tweepy Tweets
UpperCAmelCase_ = []
# make initial request for most recent tweets (200 is the maximum allowed count)
UpperCAmelCase_ = api.user_timeline(screen_name=A_ , count=200 )
# save most recent tweets
alltweets.extend(A_ )
# save the id of the oldest tweet less one
UpperCAmelCase_ = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(A_ ) > 0:
print(F"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
UpperCAmelCase_ = api.user_timeline(
screen_name=A_ , count=200 , max_id=A_ )
# save most recent tweets
alltweets.extend(A_ )
# update the id of the oldest tweet less one
UpperCAmelCase_ = alltweets[-1].id - 1
print(F"""...{len(A_ )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
UpperCAmelCase_ = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"""new_{screen_name}_tweets.csv""" , "w" ) as f:
UpperCAmelCase_ = csv.writer(A_ )
writer.writerow(["id", "created_at", "text"] )
writer.writerows(A_ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''')
| 660 | 0 |
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowercase ( _A ):
'''simple docstring'''
def _a ( self ):
lowerCAmelCase_: Optional[int] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase__ , "embed_dim" ) )
self.parent.assertTrue(hasattr(UpperCamelCase__ , "num_heads" ) )
class _lowercase :
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=64 , lowerCamelCase__=3 , lowerCamelCase__=[16, 48, 96] , lowerCamelCase__=[1, 3, 6] , lowerCamelCase__=[1, 2, 10] , lowerCamelCase__=[7, 3, 3] , lowerCamelCase__=[4, 2, 2] , lowerCamelCase__=[2, 1, 1] , lowerCamelCase__=[2, 2, 2] , lowerCamelCase__=[False, False, True] , lowerCamelCase__=[0.0, 0.0, 0.0] , lowerCamelCase__=0.0_2 , lowerCamelCase__=1E-12 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=2 , ):
lowerCAmelCase_: Any = parent
lowerCAmelCase_: Dict = batch_size
lowerCAmelCase_: List[Any] = image_size
lowerCAmelCase_: Union[str, Any] = patch_sizes
lowerCAmelCase_: Optional[int] = patch_stride
lowerCAmelCase_: Any = patch_padding
lowerCAmelCase_: Optional[Any] = is_training
lowerCAmelCase_: Dict = use_labels
lowerCAmelCase_: Tuple = num_labels
lowerCAmelCase_: Any = num_channels
lowerCAmelCase_: List[str] = embed_dim
lowerCAmelCase_: str = num_heads
lowerCAmelCase_: Optional[Any] = stride_kv
lowerCAmelCase_: List[str] = depth
lowerCAmelCase_: List[str] = cls_token
lowerCAmelCase_: Union[str, Any] = attention_drop_rate
lowerCAmelCase_: Dict = initializer_range
lowerCAmelCase_: List[str] = layer_norm_eps
def _a ( self ):
lowerCAmelCase_: Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_: List[Any] = None
if self.use_labels:
lowerCAmelCase_: Tuple = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase_: List[Any] = self.get_config()
return config, pixel_values, labels
def _a ( self ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def _a ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCAmelCase_: Tuple = CvtModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCAmelCase_: Any = model(UpperCamelCase__ )
lowerCAmelCase_: Tuple = (self.image_size, self.image_size)
lowerCAmelCase_ , lowerCAmelCase_: Optional[int] = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
lowerCAmelCase_: int = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
lowerCAmelCase_: List[str] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def _a ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCAmelCase_: int = self.num_labels
lowerCAmelCase_: Optional[int] = CvtForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCAmelCase_: List[str] = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self ):
lowerCAmelCase_: Dict = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_: List[str] = config_and_inputs
lowerCAmelCase_: int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( _A , _A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE: Tuple = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE: Dict = (
{'feature-extraction': CvtModel, 'image-classification': CvtForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE: Optional[int] = False
SCREAMING_SNAKE_CASE: Any = False
SCREAMING_SNAKE_CASE: List[Any] = False
SCREAMING_SNAKE_CASE: Optional[int] = False
SCREAMING_SNAKE_CASE: Optional[int] = False
def _a ( self ):
lowerCAmelCase_: int = CvtModelTester(self )
lowerCAmelCase_: Optional[int] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def _a ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self ):
return
@unittest.skip(reason="Cvt does not output attentions" )
def _a ( self ):
pass
@unittest.skip(reason="Cvt does not use inputs_embeds" )
def _a ( self ):
pass
@unittest.skip(reason="Cvt does not support input and output embeddings" )
def _a ( self ):
pass
def _a ( self ):
lowerCAmelCase_ , lowerCAmelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_: List[str] = model_class(UpperCamelCase__ )
lowerCAmelCase_: Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_: Any = [*signature.parameters.keys()]
lowerCAmelCase_: List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def _a ( self ):
lowerCAmelCase_: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _a ( self ):
def check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCAmelCase_: List[Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCAmelCase_: List[str] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCAmelCase_: Any = outputs.hidden_states
lowerCAmelCase_: Any = len(self.model_tester.depth )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
lowerCAmelCase_ , lowerCAmelCase_: int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_: List[Any] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase_: str = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def _a ( self ):
lowerCAmelCase_: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _a ( self ):
pass
@slow
def _a ( self ):
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_: Dict = CvtModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def snake_case__ ( ):
lowerCAmelCase_: Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _a ( self ):
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _a ( self ):
lowerCAmelCase_: List[Any] = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(UpperCamelCase__ )
lowerCAmelCase_: Tuple = self.default_image_processor
lowerCAmelCase_: Optional[Any] = prepare_img()
lowerCAmelCase_: int = image_processor(images=UpperCamelCase__ , return_tensors="pt" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCAmelCase_: List[str] = model(**UpperCamelCase__ )
# verify the logits
lowerCAmelCase_: List[Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCAmelCase_: str = torch.tensor([0.9_2_8_5, 0.9_0_1_5, -0.3_1_5_0] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 ) ) | 613 | '''simple docstring'''
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
__snake_case : int = logging.get_logger(__name__)
class lowercase_ ( _A ):
def __init__( self , **UpperCamelCase__ ) -> Tuple:
"""simple docstring"""
requires_backends(self , ["bs4"] )
super().__init__(**UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
UpperCAmelCase_ = parent.find_all(child.name , recursive=UpperCamelCase__ )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(UpperCamelCase__ ) else next(i for i, s in enumerate(UpperCamelCase__ , 1 ) if s is child ) )
UpperCAmelCase_ = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = BeautifulSoup(UpperCamelCase__ , "html.parser" )
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for element in html_code.descendants:
if type(UpperCamelCase__ ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
UpperCAmelCase_ = html.unescape(UpperCamelCase__ ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = self.xpath_soup(UpperCamelCase__ )
stringaxtag_seq.append(UpperCamelCase__ )
stringaxsubs_seq.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError("Number of doc strings and xtags does not correspond" )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError("Number of doc strings and xsubs does not correspond" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = ""
for tagname, subs in zip(UpperCamelCase__ , UpperCamelCase__ ):
xpath += F"""/{tagname}"""
if subs != 0:
xpath += F"""[{subs}]"""
return xpath
def __call__( self , UpperCamelCase__ ) -> BatchFeature:
"""simple docstring"""
UpperCAmelCase_ = False
# Check that strings has a valid type
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = True
elif isinstance(UpperCamelCase__ , (list, tuple) ):
if len(UpperCamelCase__ ) == 0 or isinstance(html_strings[0] , UpperCamelCase__ ):
UpperCAmelCase_ = True
if not valid_strings:
raise ValueError(
"HTML strings must of type `str`, `List[str]` (batch of examples), "
F"""but is of type {type(UpperCamelCase__ )}.""" )
UpperCAmelCase_ = bool(isinstance(UpperCamelCase__ , (list, tuple) ) and (isinstance(html_strings[0] , UpperCamelCase__ )) )
if not is_batched:
UpperCAmelCase_ = [html_strings]
# Get nodes + xpaths
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for html_string in html_strings:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.get_three_from_single(UpperCamelCase__ )
nodes.append(UpperCamelCase__ )
UpperCAmelCase_ = []
for node, tag_list, sub_list in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = self.construct_xpath(UpperCamelCase__ , UpperCamelCase__ )
xpath_strings.append(UpperCamelCase__ )
xpaths.append(UpperCamelCase__ )
# return as Dict
UpperCAmelCase_ = {"nodes": nodes, "xpaths": xpaths}
UpperCAmelCase_ = BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
return encoded_inputs
| 660 | 0 |
"""simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
a :Optional[Any] = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class __a :
'''simple docstring'''
def __init__( self , _a , _a=16 , _a=13 , _a=7 , _a=14 , _a=10 , _a=19 , _a=5 , _a=4 , _a=True , _a=16 , _a=2 , _a=4 , _a=4 , _a="gelu" , _a=0.1 , _a=0.1 , _a=[1, 2, 3, 4, 5] , _a=25 , _a=5 , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = d_model
SCREAMING_SNAKE_CASE__ : str = parent
SCREAMING_SNAKE_CASE__ : Optional[int] = batch_size
SCREAMING_SNAKE_CASE__ : Any = prediction_length
SCREAMING_SNAKE_CASE__ : Any = context_length
SCREAMING_SNAKE_CASE__ : Dict = cardinality
SCREAMING_SNAKE_CASE__ : str = num_time_features
SCREAMING_SNAKE_CASE__ : List[str] = lags_sequence
SCREAMING_SNAKE_CASE__ : List[str] = embedding_dimension
SCREAMING_SNAKE_CASE__ : List[Any] = is_training
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE__ : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = context_length
SCREAMING_SNAKE_CASE__ : List[str] = prediction_length + label_length
SCREAMING_SNAKE_CASE__ : int = label_length
SCREAMING_SNAKE_CASE__ : Dict = moving_average
SCREAMING_SNAKE_CASE__ : Union[str, Any] = autocorrelation_factor
def _a ( self ) -> List[Any]:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def _a ( self , _a ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = config.context_length + max(config.lags_sequence )
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
SCREAMING_SNAKE_CASE__ : Optional[int] = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
SCREAMING_SNAKE_CASE__ : str = floats_tensor([self.batch_size, _past_length] )
SCREAMING_SNAKE_CASE__ : Any = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
SCREAMING_SNAKE_CASE__ : Any = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
SCREAMING_SNAKE_CASE__ : Optional[int] = floats_tensor([self.batch_size, config.prediction_length] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def _a ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.get_config()
SCREAMING_SNAKE_CASE__ : Tuple = self.prepare_autoformer_inputs_dict(UpperCamelCase__ )
return config, inputs_dict
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def _a ( self , _a , _a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoformerModel(config=UpperCamelCase__ ).to(UpperCamelCase__ ).eval()
SCREAMING_SNAKE_CASE__ : Any = model(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = outputs.encoder_last_hidden_state
SCREAMING_SNAKE_CASE__ : List[Any] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : int = model.get_encoder()
encoder.save_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Any = AutoformerEncoder.from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = model.create_network_inputs(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
SCREAMING_SNAKE_CASE__ : int = encoder(inputs_embeds=UpperCamelCase__ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
SCREAMING_SNAKE_CASE__ : List[str] = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
SCREAMING_SNAKE_CASE__ : Any = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
SCREAMING_SNAKE_CASE__ : str = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : int = model.get_decoder()
decoder.save_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : str = AutoformerDecoder.from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[str] = decoder(
trend=UpperCamelCase__ , inputs_embeds=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class __a (_A , _A , unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Optional[Any] = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE :Union[str, Any] = (AutoformerForPrediction,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE :List[str] = {"""feature-extraction""": AutoformerModel} if is_torch_available() else {}
_SCREAMING_SNAKE_CASE :Optional[int] = False
_SCREAMING_SNAKE_CASE :Tuple = False
_SCREAMING_SNAKE_CASE :Optional[Any] = False
_SCREAMING_SNAKE_CASE :Dict = False
_SCREAMING_SNAKE_CASE :str = False
_SCREAMING_SNAKE_CASE :Tuple = False
def _a ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = AutoformerModelTester(self )
SCREAMING_SNAKE_CASE__ : Tuple = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def _a ( self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = model_class.from_pretrained(UpperCamelCase__ , output_loading_info=UpperCamelCase__ )
self.assertEqual(info["""missing_keys"""] , [] )
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*UpperCamelCase__ )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def _a ( self ) -> Any:
"""simple docstring"""
pass
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = inspect.signature(getattr(UpperCamelCase__ , """forward""" ) )
# The main input is the name of the argument after `self`
SCREAMING_SNAKE_CASE__ : List[Any] = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , UpperCamelCase__ )
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Dict = model_class(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : int = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : int = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(UpperCamelCase__ )] , UpperCamelCase__ )
def _a ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Optional[int] = True
SCREAMING_SNAKE_CASE__ : int = getattr(self.model_tester , """seq_length""" , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = getattr(self.model_tester , """decoder_seq_length""" , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] = getattr(self.model_tester , """encoder_seq_length""" , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Tuple = getattr(self.model_tester , """d_model""" , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : int = getattr(self.model_tester , """num_attention_heads""" , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = d_model // num_attention_heads
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[Any] = True
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
SCREAMING_SNAKE_CASE__ : Optional[int] = True
SCREAMING_SNAKE_CASE__ : Dict = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[Any] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE__ : Tuple = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE__ : int = True
SCREAMING_SNAKE_CASE__ : Tuple = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Dict = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE__ : List[Any] = outputs.encoder_attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
SCREAMING_SNAKE_CASE__ : Dict = len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
# decoder attentions
SCREAMING_SNAKE_CASE__ : Tuple = outputs.decoder_attentions
self.assertIsInstance(UpperCamelCase__ , (list, tuple) )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
SCREAMING_SNAKE_CASE__ : str = outputs.cross_attentions
self.assertIsInstance(UpperCamelCase__ , (list, tuple) )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
SCREAMING_SNAKE_CASE__ : List[Any] = True
SCREAMING_SNAKE_CASE__ : str = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Any = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + 2 , len(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE__ : Tuple = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def _a ( self ) -> str:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def _lowercase ( __lowerCAmelCase="train-batch.pt" ) -> Dict:
SCREAMING_SNAKE_CASE__ : str = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=A_ , repo_type="""dataset""" )
SCREAMING_SNAKE_CASE__ : Dict = torch.load(A_ , map_location=A_ )
return batch
@require_torch
@slow
class __a (unittest.TestCase):
'''simple docstring'''
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Dict = prepare_batch()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Dict = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
SCREAMING_SNAKE_CASE__ : Tuple = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Dict = torch.tensor(
[[0.3_593, -1.3_398, 0.6_330], [0.2_279, 1.5_396, -0.1_792], [0.0_450, 1.3_225, -0.2_335]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Dict = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
SCREAMING_SNAKE_CASE__ : Any = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor(
[[-0.0_734, -0.9_036, 0.8_358], [4.7_186, 2.4_113, 1.9_581], [1.7_953, 2.3_558, 1.2_970]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Tuple = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[Any] = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
SCREAMING_SNAKE_CASE__ : List[str] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor([3_130.6_763, 4_056.5_293, 7_053.0_786] , device=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , UpperCamelCase__ , rtol=1E-1 ) )
| 680 | '''simple docstring'''
def lowerCamelCase__ ( A_ , A_ ):
_validate_point(A_ )
_validate_point(A_ )
if len(A_ ) != len(A_ ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(A_ , A_ ) ) )
def lowerCamelCase__ ( A_ ):
if point:
if isinstance(A_ , A_ ):
for item in point:
if not isinstance(A_ , (int, float) ):
UpperCAmelCase_ = (
"Expected a list of numbers as input, found "
F"""{type(A_ ).__name__}"""
)
raise TypeError(A_ )
else:
UpperCAmelCase_ = F"""Expected a list of numbers as input, found {type(A_ ).__name__}"""
raise TypeError(A_ )
else:
raise ValueError("Missing an input" )
def lowerCamelCase__ ( A_ , A_ ):
_validate_point(A_ )
_validate_point(A_ )
if len(A_ ) != len(A_ ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(A_ , A_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 | 0 |
"""simple docstring"""
from __future__ import annotations
lowercase_ = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def A_ ( lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Any = [
[0 for col in range(len(grid[0] ) )] for row in range(len(A_ ) )
] # the reference grid
UpperCAmelCase_ : int = 1
UpperCAmelCase_ : Union[str, Any] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(A_ ) )
] # the action grid
UpperCAmelCase_ : List[str] = init[0]
UpperCAmelCase_ : Dict = init[1]
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : Union[str, Any] = g + heuristic[x][y] # cost from starting cell to destination cell
UpperCAmelCase_ : Any = [[f, g, x, y]]
UpperCAmelCase_ : Union[str, Any] = False # flag that is set when search is complete
UpperCAmelCase_ : str = False # flag set if we can't find expand
while not found and not resign:
if len(A_ ) == 0:
raise ValueError("""Algorithm is unable to find solution""" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
UpperCAmelCase_ : str = cell.pop()
UpperCAmelCase_ : Optional[int] = next_cell[2]
UpperCAmelCase_ : int = next_cell[3]
UpperCAmelCase_ : List[str] = next_cell[1]
if x == goal[0] and y == goal[1]:
UpperCAmelCase_ : Optional[Any] = True
else:
for i in range(len(A_ ) ): # to try out different valid actions
UpperCAmelCase_ : Dict = x + DIRECTIONS[i][0]
UpperCAmelCase_ : List[str] = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(A_ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
UpperCAmelCase_ : Union[str, Any] = g + cost
UpperCAmelCase_ : Tuple = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
UpperCAmelCase_ : Union[str, Any] = 1
UpperCAmelCase_ : List[str] = i
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : Optional[int] = goal[0]
UpperCAmelCase_ : int = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
UpperCAmelCase_ : str = x - DIRECTIONS[action[x][y]][0]
UpperCAmelCase_ : Optional[Any] = y - DIRECTIONS[action[x][y]][1]
UpperCAmelCase_ : int = xa
UpperCAmelCase_ : List[str] = ya
invpath.append([x, y] )
UpperCAmelCase_ : List[str] = []
for i in range(len(A_ ) ):
path.append(invpath[len(A_ ) - 1 - i] )
return path, action
if __name__ == "__main__":
lowercase_ = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
lowercase_ = [0, 0]
# all coordinates are given in format [y,x]
lowercase_ = [len(grid) - 1, len(grid[0]) - 1]
lowercase_ = 1
# the cost map which pushes the path closer to the goal
lowercase_ = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
lowercase_ = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
lowercase_ = 99
lowercase_ = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 470 | '''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
__snake_case : Optional[int] = logging.getLogger(__name__)
def lowerCamelCase__ ( A_ , A_ ):
# save results
if os.path.exists(A_ ):
if os.path.exists(os.path.join(A_ , "config.json" ) ) and os.path.isfile(
os.path.join(A_ , "config.json" ) ):
os.remove(os.path.join(A_ , "config.json" ) )
if os.path.exists(os.path.join(A_ , "pytorch_model.bin" ) ) and os.path.isfile(
os.path.join(A_ , "pytorch_model.bin" ) ):
os.remove(os.path.join(A_ , "pytorch_model.bin" ) )
else:
os.makedirs(A_ )
model.save_pretrained(A_ )
def lowerCamelCase__ ( A_ , A_=False ):
UpperCAmelCase_ = 2
if unlogit:
UpperCAmelCase_ = torch.pow(A_ , A_ )
UpperCAmelCase_ = p * torch.log(A_ )
UpperCAmelCase_ = 0
return -plogp.sum(dim=-1 )
def lowerCamelCase__ ( A_ ):
logger.info("lv, h >\t" + "\t".join(F"""{x + 1}""" for x in range(len(A_ ) ) ) )
for row in range(len(A_ ) ):
if tensor.dtype != torch.long:
logger.info(F"""layer {row + 1}:\t""" + "\t".join(F"""{x:.5f}""" for x in tensor[row].cpu().data ) )
else:
logger.info(F"""layer {row + 1}:\t""" + "\t".join(F"""{x:d}""" for x in tensor[row].cpu().data ) )
def lowerCamelCase__ ( A_ , A_ , A_ , A_=True , A_=True , A_=None , A_=False ):
UpperCAmelCase_ , UpperCAmelCase_ = model.config.num_hidden_layers, model.config.num_attention_heads
UpperCAmelCase_ = torch.zeros(A_ , A_ ).to(args.device )
UpperCAmelCase_ = torch.zeros(A_ , A_ ).to(args.device )
if head_mask is None:
UpperCAmelCase_ = torch.ones(A_ , A_ ).to(args.device )
head_mask.requires_grad_(requires_grad=A_ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
UpperCAmelCase_ = None
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = 0.0
for step, inputs in enumerate(tqdm(A_ , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ):
UpperCAmelCase_ = tuple(t.to(args.device ) for t in inputs )
((UpperCAmelCase_) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
UpperCAmelCase_ = model(A_ , labels=A_ , head_mask=A_ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(A_ ):
UpperCAmelCase_ = entropy(attn.detach() , A_ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(A_ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
UpperCAmelCase_ = 2
UpperCAmelCase_ = torch.pow(torch.pow(A_ , A_ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
UpperCAmelCase_ = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("Attention entropies" )
print_ad_tensor(A_ )
if compute_importance:
logger.info("Head importance scores" )
print_ad_tensor(A_ )
logger.info("Head ranked by importance scores" )
UpperCAmelCase_ = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
UpperCAmelCase_ = torch.arange(
head_importance.numel() , device=args.device )
UpperCAmelCase_ = head_ranks.view_as(A_ )
print_ad_tensor(A_ )
return attn_entropy, head_importance, total_loss
def lowerCamelCase__ ( A_ , A_ , A_ ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = compute_heads_importance(A_ , A_ , A_ , compute_entropy=A_ )
UpperCAmelCase_ = 1 / loss # instead of downsteam score use the LM loss
logger.info("Pruning: original score: %f, threshold: %f" , A_ , original_score * args.masking_threshold )
UpperCAmelCase_ = torch.ones_like(A_ )
UpperCAmelCase_ = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
UpperCAmelCase_ = original_score
while current_score >= original_score * args.masking_threshold:
UpperCAmelCase_ = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
UpperCAmelCase_ = float("Inf" )
UpperCAmelCase_ = head_importance.view(-1 ).sort()[1]
if len(A_ ) <= num_to_mask:
print("BREAK BY num_to_mask" )
break
# mask heads
UpperCAmelCase_ = current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) )
UpperCAmelCase_ = new_head_mask.view(-1 )
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = new_head_mask.view_as(A_ )
UpperCAmelCase_ = new_head_mask.clone().detach()
print_ad_tensor(A_ )
# Compute metric and head importance again
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = compute_heads_importance(
A_ , A_ , A_ , compute_entropy=A_ , head_mask=A_ )
UpperCAmelCase_ = 1 / loss
logger.info(
"Masking: current score: %f, remaining heads %d (%.1f percents)" , A_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("Final head mask" )
print_ad_tensor(A_ )
np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowerCamelCase__ ( A_ , A_ , A_ , A_ ):
UpperCAmelCase_ = datetime.now()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = compute_heads_importance(
A_ , A_ , A_ , compute_entropy=A_ , compute_importance=A_ , head_mask=A_ )
UpperCAmelCase_ = 1 / loss
UpperCAmelCase_ = datetime.now() - before_time
UpperCAmelCase_ = sum(p.numel() for p in model.parameters() )
UpperCAmelCase_ = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(A_ ) )
}
for k, v in heads_to_prune.items():
if isinstance(A_ , A_ ):
UpperCAmelCase_ = [
v,
]
assert sum(len(A_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(A_ )
UpperCAmelCase_ = sum(p.numel() for p in model.parameters() )
UpperCAmelCase_ = datetime.now()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = compute_heads_importance(
A_ , A_ , A_ , compute_entropy=A_ , compute_importance=A_ , head_mask=A_ , actually_pruned=A_ , )
UpperCAmelCase_ = 1 / loss
UpperCAmelCase_ = datetime.now() - before_time
logger.info(
"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , A_ , A_ , pruned_num_params / original_num_params * 100 , )
logger.info("Pruning: score with masking: %f score with pruning: %f" , A_ , A_ )
logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 100 )
save_model(A_ , args.output_dir )
def lowerCamelCase__ ( ):
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir" , default=A_ , type=A_ , required=A_ , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , )
parser.add_argument(
"--model_name_or_path" , default=A_ , type=A_ , required=A_ , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--output_dir" , default=A_ , type=A_ , required=A_ , help="The output directory where the model predictions and checkpoints will be written." , )
# Other parameters
parser.add_argument(
"--config_name" , default="" , type=A_ , help="Pretrained config name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--tokenizer_name" , default="" , type=A_ , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--cache_dir" , default=A_ , type=A_ , help="Where do you want to store the pre-trained models downloaded from s3" , )
parser.add_argument(
"--data_subset" , type=A_ , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." )
parser.add_argument(
"--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
parser.add_argument(
"--dont_normalize_importance_by_layer" , action="store_true" , help="Don't normalize importance score by layers" )
parser.add_argument(
"--dont_normalize_global_importance" , action="store_true" , help="Don't normalize all importance scores between 0 and 1" , )
parser.add_argument(
"--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." )
parser.add_argument(
"--masking_threshold" , default=0.9 , type=A_ , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , )
parser.add_argument(
"--masking_amount" , default=0.1 , type=A_ , help="Amount to heads to masking at each masking step." )
parser.add_argument("--metric_name" , default="acc" , type=A_ , help="Metric to use for head masking." )
parser.add_argument(
"--max_seq_length" , default=128 , type=A_ , help=(
"The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded."
) , )
parser.add_argument("--batch_size" , default=1 , type=A_ , help="Batch size." )
parser.add_argument("--seed" , type=A_ , default=42 )
parser.add_argument("--local_rank" , type=A_ , default=-1 , help="local_rank for distributed training on gpus" )
parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" )
parser.add_argument("--server_ip" , type=A_ , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=A_ , default="" , help="Can be used for distant debugging." )
UpperCAmelCase_ = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=A_ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
UpperCAmelCase_ = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" )
UpperCAmelCase_ = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
UpperCAmelCase_ = torch.device("cuda" , args.local_rank )
UpperCAmelCase_ = 1
torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
UpperCAmelCase_ = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
UpperCAmelCase_ = nn.parallel.DistributedDataParallel(
A_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=A_ )
elif args.n_gpu > 1:
UpperCAmelCase_ = nn.DataParallel(A_ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=A_ )
torch.save(A_ , os.path.join(args.output_dir , "run_args.bin" ) )
logger.info("Training/evaluation parameters %s" , A_ )
# Prepare dataset
UpperCAmelCase_ = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
UpperCAmelCase_ = (torch.from_numpy(A_ ),)
UpperCAmelCase_ = TensorDataset(*A_ )
UpperCAmelCase_ = RandomSampler(A_ )
UpperCAmelCase_ = DataLoader(A_ , sampler=A_ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(A_ , A_ , A_ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
UpperCAmelCase_ = mask_heads(A_ , A_ , A_ )
prune_heads(A_ , A_ , A_ , A_ )
if __name__ == "__main__":
main()
| 660 | 0 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
def __init__( self , __A , __A=7 , __A=3 , __A=30 , __A=400 , __A=True , __A=None , __A=True , __A=[0.5, 0.5, 0.5] , __A=[0.5, 0.5, 0.5] , __A=True , __A=1 / 255 , __A=True , ):
__UpperCAmelCase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1_333}
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = min_resolution
__UpperCAmelCase = max_resolution
__UpperCAmelCase = do_resize
__UpperCAmelCase = size
__UpperCAmelCase = do_normalize
__UpperCAmelCase = image_mean
__UpperCAmelCase = image_std
__UpperCAmelCase = do_rescale
__UpperCAmelCase = rescale_factor
__UpperCAmelCase = do_pad
def __lowerCamelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __lowerCamelCase ( self , __A , __A=False ):
if not batched:
__UpperCAmelCase = image_inputs[0]
if isinstance(UpperCamelCase__ , Image.Image ):
__UpperCAmelCase , __UpperCAmelCase = image.size
else:
__UpperCAmelCase , __UpperCAmelCase = image.shape[1], image.shape[2]
if w < h:
__UpperCAmelCase = int(self.size['shortest_edge'] * h / w )
__UpperCAmelCase = self.size['shortest_edge']
elif w > h:
__UpperCAmelCase = self.size['shortest_edge']
__UpperCAmelCase = int(self.size['shortest_edge'] * w / h )
else:
__UpperCAmelCase = self.size['shortest_edge']
__UpperCAmelCase = self.size['shortest_edge']
else:
__UpperCAmelCase = []
for image in image_inputs:
__UpperCAmelCase , __UpperCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__UpperCAmelCase = max(UpperCamelCase__ , key=lambda __A : item[0] )[0]
__UpperCAmelCase = max(UpperCamelCase__ , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCAmelCase ( _A , unittest.TestCase ):
_A : Tuple = ConditionalDetrImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
__UpperCAmelCase = ConditionalDetrImageProcessingTester(self )
@property
def __lowerCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , 'image_mean' ) )
self.assertTrue(hasattr(UpperCamelCase__ , 'image_std' ) )
self.assertTrue(hasattr(UpperCamelCase__ , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCamelCase__ , 'do_resize' ) )
self.assertTrue(hasattr(UpperCamelCase__ , 'size' ) )
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1_333} )
self.assertEqual(image_processor.do_pad , UpperCamelCase__ )
__UpperCAmelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase__ )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , UpperCamelCase__ )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
__UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__UpperCAmelCase , __UpperCAmelCase = self.image_processor_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCAmelCase , __UpperCAmelCase = self.image_processor_tester.get_expected_values(UpperCamelCase__ , batched=UpperCamelCase__ )
__UpperCAmelCase = image_processing(UpperCamelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
__UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__UpperCAmelCase , __UpperCAmelCase = self.image_processor_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCAmelCase = image_processing(UpperCamelCase__ , return_tensors='pt' ).pixel_values
__UpperCAmelCase , __UpperCAmelCase = self.image_processor_tester.get_expected_values(UpperCamelCase__ , batched=UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
__UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__UpperCAmelCase , __UpperCAmelCase = self.image_processor_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCAmelCase = image_processing(UpperCamelCase__ , return_tensors='pt' ).pixel_values
__UpperCAmelCase , __UpperCAmelCase = self.image_processor_tester.get_expected_values(UpperCamelCase__ , batched=UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __lowerCamelCase ( self ):
__UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
__UpperCAmelCase = json.loads(f.read() )
__UpperCAmelCase = {'image_id': 39_769, 'annotations': target}
# encode them
__UpperCAmelCase = ConditionalDetrImageProcessor.from_pretrained('microsoft/conditional-detr-resnet-50' )
__UpperCAmelCase = image_processing(images=UpperCamelCase__ , annotations=UpperCamelCase__ , return_tensors='pt' )
# verify pixel values
__UpperCAmelCase = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape , UpperCamelCase__ )
__UpperCAmelCase = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , UpperCamelCase__ , atol=1E-4 ) )
# verify area
__UpperCAmelCase = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , UpperCamelCase__ ) )
# verify boxes
__UpperCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , UpperCamelCase__ )
__UpperCAmelCase = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , UpperCamelCase__ , atol=1E-3 ) )
# verify image_id
__UpperCAmelCase = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , UpperCamelCase__ ) )
# verify is_crowd
__UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , UpperCamelCase__ ) )
# verify class_labels
__UpperCAmelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , UpperCamelCase__ ) )
# verify orig_size
__UpperCAmelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , UpperCamelCase__ ) )
# verify size
__UpperCAmelCase = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , UpperCamelCase__ ) )
@slow
def __lowerCamelCase ( self ):
__UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
__UpperCAmelCase = json.loads(f.read() )
__UpperCAmelCase = {'file_name': '000000039769.png', 'image_id': 39_769, 'segments_info': target}
__UpperCAmelCase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
__UpperCAmelCase = ConditionalDetrImageProcessor(format='coco_panoptic' )
__UpperCAmelCase = image_processing(images=UpperCamelCase__ , annotations=UpperCamelCase__ , masks_path=UpperCamelCase__ , return_tensors='pt' )
# verify pixel values
__UpperCAmelCase = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape , UpperCamelCase__ )
__UpperCAmelCase = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , UpperCamelCase__ , atol=1E-4 ) )
# verify area
__UpperCAmelCase = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , UpperCamelCase__ ) )
# verify boxes
__UpperCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , UpperCamelCase__ )
__UpperCAmelCase = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , UpperCamelCase__ , atol=1E-3 ) )
# verify image_id
__UpperCAmelCase = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , UpperCamelCase__ ) )
# verify is_crowd
__UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , UpperCamelCase__ ) )
# verify class_labels
__UpperCAmelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , UpperCamelCase__ ) )
# verify masks
__UpperCAmelCase = 822_873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , UpperCamelCase__ )
# verify orig_size
__UpperCAmelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , UpperCamelCase__ ) )
# verify size
__UpperCAmelCase = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , UpperCamelCase__ ) )
| 126 | '''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__snake_case : str = logging.getLogger(__name__)
def lowerCamelCase__ ( ):
UpperCAmelCase_ = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path" , type=A_ , default="data/dump.txt" , help="The path to the data." )
parser.add_argument("--tokenizer_type" , type=A_ , default="bert" , choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name" , type=A_ , default="bert-base-uncased" , help="The tokenizer to use." )
parser.add_argument("--dump_file" , type=A_ , default="data/dump" , help="The dump file prefix." )
UpperCAmelCase_ = parser.parse_args()
logger.info(F"""Loading Tokenizer ({args.tokenizer_name})""" )
if args.tokenizer_type == "bert":
UpperCAmelCase_ = BertTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase_ = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
UpperCAmelCase_ = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
UpperCAmelCase_ = RobertaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase_ = tokenizer.special_tokens_map["cls_token"] # `<s>`
UpperCAmelCase_ = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
UpperCAmelCase_ = GPTaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase_ = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
UpperCAmelCase_ = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(F"""Loading text from {args.file_path}""" )
with open(args.file_path , "r" , encoding="utf8" ) as fp:
UpperCAmelCase_ = fp.readlines()
logger.info("Start encoding" )
logger.info(F"""{len(A_ )} examples to process.""" )
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
UpperCAmelCase_ = 10_000
UpperCAmelCase_ = time.time()
for text in data:
UpperCAmelCase_ = F"""{bos} {text.strip()} {sep}"""
UpperCAmelCase_ = tokenizer.encode(A_ , add_special_tokens=A_ )
rslt.append(A_ )
iter += 1
if iter % interval == 0:
UpperCAmelCase_ = time.time()
logger.info(F"""{iter} examples processed. - {(end-start):.2f}s/{interval}expl""" )
UpperCAmelCase_ = time.time()
logger.info("Finished binarization" )
logger.info(F"""{len(A_ )} examples processed.""" )
UpperCAmelCase_ = F"""{args.dump_file}.{args.tokenizer_name}.pickle"""
UpperCAmelCase_ = tokenizer.vocab_size
if vocab_size < (1 << 16):
UpperCAmelCase_ = [np.uintaa(A_ ) for d in rslt]
else:
UpperCAmelCase_ = [np.intaa(A_ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F"""Dump to {dp_file}""" )
with open(A_ , "wb" ) as handle:
pickle.dump(rslt_ , A_ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 660 | 0 |
def __lowerCamelCase ( __lowerCAmelCase : List[str] = 200 ) -> Tuple:
__UpperCamelCase : str = [1, 2, 5, 10, 20, 50, 100, 200]
__UpperCamelCase : Optional[int] = [0] * (pence + 1)
__UpperCamelCase : Optional[Any] = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(A_ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 7_3682
| 269 | '''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__snake_case : Dict = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
__snake_case : str = json.load(f)
@require_torch
class lowercase_ ( unittest.TestCase ):
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Dict:
"""simple docstring"""
return FSMTTokenizer.from_pretrained(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
UpperCAmelCase_ = FSMTForConditionalGeneration.from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["en-ru", 26.0],
["ru-en", 22.0],
["en-de", 22.0],
["de-en", 29.0],
] )
@slow
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = F"""facebook/wmt19-{pair}"""
UpperCAmelCase_ = self.get_tokenizer(UpperCamelCase__ )
UpperCAmelCase_ = self.get_model(UpperCamelCase__ )
UpperCAmelCase_ = bleu_data[pair]["src"]
UpperCAmelCase_ = bleu_data[pair]["tgt"]
UpperCAmelCase_ = tokenizer(UpperCamelCase__ , return_tensors="pt" , truncation=UpperCamelCase__ , padding="longest" ).to(UpperCamelCase__ )
UpperCAmelCase_ = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
UpperCAmelCase_ = tokenizer.batch_decode(
UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )
UpperCAmelCase_ = calculate_bleu(UpperCamelCase__ , UpperCamelCase__ )
print(UpperCamelCase__ )
self.assertGreaterEqual(scores["bleu"] , UpperCamelCase__ )
| 660 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
UpperCAmelCase_ : int = logging.get_logger('transformers.models.speecht5')
UpperCAmelCase_ : Dict = {
'''speech_encoder_prenet.layer_norm''': '''speecht5.encoder.prenet.feature_projection.layer_norm''',
'''speech_encoder_prenet.post_extract_proj''': '''speecht5.encoder.prenet.feature_projection.projection''',
'''speech_encoder_prenet.pos_conv.0''': '''speecht5.encoder.prenet.pos_conv_embed.conv''',
'''speech_encoder_prenet.mask_emb''': '''speecht5.encoder.prenet.masked_spec_embed''',
}
UpperCAmelCase_ : List[str] = {
'''text_encoder_prenet.encoder_prenet.0''': '''speecht5.encoder.prenet.embed_tokens''',
'''text_encoder_prenet.encoder_prenet.1.alpha''': '''speecht5.encoder.prenet.encode_positions.alpha''',
}
UpperCAmelCase_ : str = {
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0''': '''speecht5.decoder.prenet.layers.0''',
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0''': '''speecht5.decoder.prenet.layers.1''',
'''speech_decoder_prenet.decoder_prenet.0.1''': '''speecht5.decoder.prenet.final_layer''',
'''speech_decoder_prenet.decoder_prenet.1.alpha''': '''speecht5.decoder.prenet.encode_positions.alpha''',
'''speech_decoder_prenet.spkembs_layer.0''': '''speecht5.decoder.prenet.speaker_embeds_layer''',
}
UpperCAmelCase_ : Tuple = {
'''speech_decoder_postnet.feat_out''': '''speech_decoder_postnet.feat_out''',
'''speech_decoder_postnet.prob_out''': '''speech_decoder_postnet.prob_out''',
'''speech_decoder_postnet.postnet.postnet.0.0''': '''speech_decoder_postnet.layers.0.conv''',
'''speech_decoder_postnet.postnet.postnet.0.1''': '''speech_decoder_postnet.layers.0.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.1.0''': '''speech_decoder_postnet.layers.1.conv''',
'''speech_decoder_postnet.postnet.postnet.1.1''': '''speech_decoder_postnet.layers.1.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.2.0''': '''speech_decoder_postnet.layers.2.conv''',
'''speech_decoder_postnet.postnet.postnet.2.1''': '''speech_decoder_postnet.layers.2.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.3.0''': '''speech_decoder_postnet.layers.3.conv''',
'''speech_decoder_postnet.postnet.postnet.3.1''': '''speech_decoder_postnet.layers.3.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.4.0''': '''speech_decoder_postnet.layers.4.conv''',
'''speech_decoder_postnet.postnet.postnet.4.1''': '''speech_decoder_postnet.layers.4.batch_norm''',
}
UpperCAmelCase_ : Union[str, Any] = {
'''text_decoder_prenet.embed_tokens''': '''speecht5.decoder.prenet.embed_tokens''',
}
UpperCAmelCase_ : Optional[int] = {
'''text_decoder_postnet.output_projection''': '''text_decoder_postnet.lm_head''',
}
UpperCAmelCase_ : List[str] = {
'''encoder.layers.*.self_attn.k_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj''',
'''encoder.layers.*.self_attn.v_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj''',
'''encoder.layers.*.self_attn.q_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj''',
'''encoder.layers.*.self_attn.out_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj''',
'''encoder.layers.*.self_attn_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.layer_norm''',
'''encoder.layers.*.fc1''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense''',
'''encoder.layers.*.fc2''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense''',
'''encoder.layers.*.final_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''speecht5.encoder.wrapped_encoder.layer_norm''',
'''encoder.pos_emb.pe_k''': '''speecht5.encoder.wrapped_encoder.embed_positions.pe_k''',
}
UpperCAmelCase_ : Dict = {
'''decoder.layers.*.self_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj''',
'''decoder.layers.*.self_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj''',
'''decoder.layers.*.self_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj''',
'''decoder.layers.*.self_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj''',
'''decoder.layers.*.self_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm''',
'''decoder.layers.*.encoder_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj''',
'''decoder.layers.*.encoder_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj''',
'''decoder.layers.*.encoder_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj''',
'''decoder.layers.*.encoder_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj''',
'''decoder.layers.*.encoder_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm''',
'''decoder.layers.*.fc1''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense''',
'''decoder.layers.*.fc2''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense''',
'''decoder.layers.*.final_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm''',
}
UpperCAmelCase_ : Dict = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
UpperCAmelCase_ : Tuple = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
UpperCAmelCase_ : Tuple = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Optional[Any] = [
'''encoder.version''',
'''encoder.layers.*.norm_k.weight''',
'''encoder.layers.*.norm_k.bias''',
'''decoder.version''',
'''decoder.layers.*.norm_k.weight''',
'''decoder.layers.*.norm_k.bias''',
'''decoder.pos_emb.pe_k''',
'''speech_encoder_prenet.embed_positions._float_tensor''',
'''text_decoder_prenet.embed_positions._float_tensor''',
]
UpperCAmelCase_ : Union[str, Any] = IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''speech_decoder_prenet.*''',
'''speech_decoder_postnet.*''',
]
UpperCAmelCase_ : Union[str, Any] = IGNORE_KEYS + [
'''encoder.proj''',
'''speech_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
UpperCAmelCase_ : Any = IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
for attribute in key.split(""".""" ):
_SCREAMING_SNAKE_CASE : List[Any] = getattr(A_ , A_ )
if weight_type is not None:
_SCREAMING_SNAKE_CASE : Dict = getattr(A_ , A_ ).shape
else:
_SCREAMING_SNAKE_CASE : List[str] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
_SCREAMING_SNAKE_CASE : List[str] = value
elif weight_type == "weight_g":
_SCREAMING_SNAKE_CASE : Optional[int] = value
elif weight_type == "weight_v":
_SCREAMING_SNAKE_CASE : Any = value
elif weight_type == "bias":
_SCREAMING_SNAKE_CASE : Any = value
elif weight_type == "running_mean":
_SCREAMING_SNAKE_CASE : int = value
elif weight_type == "running_var":
_SCREAMING_SNAKE_CASE : List[str] = value
elif weight_type == "num_batches_tracked":
_SCREAMING_SNAKE_CASE : List[str] = value
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = value
logger.info(f"""{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.""" )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = []
if task == "s2t":
_SCREAMING_SNAKE_CASE : Optional[Any] = hf_model.speechta.encoder.prenet.feature_encoder
_SCREAMING_SNAKE_CASE : int = MAPPING_S2T
_SCREAMING_SNAKE_CASE : Union[str, Any] = IGNORE_KEYS_S2T
elif task == "t2s":
_SCREAMING_SNAKE_CASE : int = None
_SCREAMING_SNAKE_CASE : List[str] = MAPPING_T2S
_SCREAMING_SNAKE_CASE : List[str] = IGNORE_KEYS_T2S
elif task == "s2s":
_SCREAMING_SNAKE_CASE : Union[str, Any] = hf_model.speechta.encoder.prenet.feature_encoder
_SCREAMING_SNAKE_CASE : Tuple = MAPPING_S2S
_SCREAMING_SNAKE_CASE : Tuple = IGNORE_KEYS_S2S
else:
raise ValueError(f"""Unsupported task: {task}""" )
for name, value in fairseq_dict.items():
if should_ignore(A_ , A_ ):
logger.info(f"""{name} was ignored""" )
continue
_SCREAMING_SNAKE_CASE : List[Any] = False
if "conv_layers" in name:
load_conv_layer(
A_ , A_ , A_ , A_ , hf_model.config.feat_extract_norm == """group""" , )
_SCREAMING_SNAKE_CASE : Dict = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = key.split(""".*.""" )
if prefix in name and suffix in name:
_SCREAMING_SNAKE_CASE : str = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
_SCREAMING_SNAKE_CASE : Dict = True
if "*" in mapped_key:
_SCREAMING_SNAKE_CASE : List[Any] = name.split(A_ )[0].split(""".""" )[-2]
_SCREAMING_SNAKE_CASE : Optional[int] = mapped_key.replace("""*""" , A_ )
if "weight_g" in name:
_SCREAMING_SNAKE_CASE : str = """weight_g"""
elif "weight_v" in name:
_SCREAMING_SNAKE_CASE : Union[str, Any] = """weight_v"""
elif "bias" in name:
_SCREAMING_SNAKE_CASE : List[str] = """bias"""
elif "weight" in name:
_SCREAMING_SNAKE_CASE : Optional[int] = """weight"""
elif "running_mean" in name:
_SCREAMING_SNAKE_CASE : Any = """running_mean"""
elif "running_var" in name:
_SCREAMING_SNAKE_CASE : Any = """running_var"""
elif "num_batches_tracked" in name:
_SCREAMING_SNAKE_CASE : Union[str, Any] = """num_batches_tracked"""
else:
_SCREAMING_SNAKE_CASE : List[Any] = None
set_recursively(A_ , A_ , A_ , A_ , A_ )
continue
if not is_used:
unused_weights.append(A_ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = full_name.split("""conv_layers.""" )[-1]
_SCREAMING_SNAKE_CASE : Union[str, Any] = name.split(""".""" )
_SCREAMING_SNAKE_CASE : Optional[Any] = int(items[0] )
_SCREAMING_SNAKE_CASE : str = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
_SCREAMING_SNAKE_CASE : List[Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
_SCREAMING_SNAKE_CASE : int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
_SCREAMING_SNAKE_CASE : Any = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(A_ )
@torch.no_grad()
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , ):
"""simple docstring"""
if config_path is not None:
_SCREAMING_SNAKE_CASE : str = SpeechTaConfig.from_pretrained(A_ )
else:
_SCREAMING_SNAKE_CASE : int = SpeechTaConfig()
if task == "s2t":
_SCREAMING_SNAKE_CASE : List[str] = config.max_text_positions
_SCREAMING_SNAKE_CASE : Tuple = SpeechTaForSpeechToText(A_ )
elif task == "t2s":
_SCREAMING_SNAKE_CASE : Union[str, Any] = 1876
_SCREAMING_SNAKE_CASE : List[str] = 600
_SCREAMING_SNAKE_CASE : Dict = config.max_speech_positions
_SCREAMING_SNAKE_CASE : Any = SpeechTaForTextToSpeech(A_ )
elif task == "s2s":
_SCREAMING_SNAKE_CASE : int = 1876
_SCREAMING_SNAKE_CASE : Optional[int] = config.max_speech_positions
_SCREAMING_SNAKE_CASE : str = SpeechTaForSpeechToSpeech(A_ )
else:
raise ValueError(f"""Unknown task name: {task}""" )
if vocab_path:
_SCREAMING_SNAKE_CASE : Dict = SpeechTaTokenizer(A_ , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
_SCREAMING_SNAKE_CASE : Dict = AddedToken("""<mask>""" , lstrip=A_ , rstrip=A_ )
_SCREAMING_SNAKE_CASE : str = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
_SCREAMING_SNAKE_CASE : int = SpeechTaFeatureExtractor()
_SCREAMING_SNAKE_CASE : Union[str, Any] = SpeechTaProcessor(tokenizer=A_ , feature_extractor=A_ )
processor.save_pretrained(A_ )
_SCREAMING_SNAKE_CASE : int = torch.load(A_ )
recursively_load_weights(fairseq_checkpoint["""model"""] , A_ , A_ )
model.save_pretrained(A_ )
if repo_id:
print("""Pushing to the hub...""" )
processor.push_to_hub(A_ )
model.push_to_hub(A_ )
if __name__ == "__main__":
UpperCAmelCase_ : int = argparse.ArgumentParser()
parser.add_argument(
'--task',
default='s2t',
type=str,
help='Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--vocab_path', default=None, type=str, help='Path to SentencePiece model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
UpperCAmelCase_ : str = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 533 | '''simple docstring'''
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
__snake_case : List[Any] = {
'''sample_size''': 32,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': 10_00,
'''block_out_channels''': [32, 64],
'''attention_head_dim''': 8,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
__snake_case : Optional[int] = {
'''sample_size''': 64,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 3,
'''num_class_embeds''': 10_00,
'''block_out_channels''': [1_92, 1_92 * 2, 1_92 * 3, 1_92 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
__snake_case : int = {
'''sample_size''': 2_56,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': None,
'''block_out_channels''': [2_56, 2_56, 2_56 * 2, 2_56 * 2, 2_56 * 4, 2_56 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''default''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
__snake_case : Dict = {
'''num_train_timesteps''': 40,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
__snake_case : Tuple = {
'''num_train_timesteps''': 2_01,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
__snake_case : str = {
'''num_train_timesteps''': 1_51,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
def lowerCamelCase__ ( A_ ):
if isinstance(A_ , A_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("boolean value expected" )
def lowerCamelCase__ ( A_ , A_ , A_ , A_ , A_=False ):
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.0.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.0.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.2.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.2.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.emb_layers.1.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.emb_layers.1.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.0.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.0.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.3.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.3.bias"""]
if has_skip:
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.skip_connection.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.skip_connection.bias"""]
return new_checkpoint
def lowerCamelCase__ ( A_ , A_ , A_ , A_ , A_=None ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = checkpoint[F"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = checkpoint[F"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 )
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.norm.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.norm.bias"""]
UpperCAmelCase_ = weight_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = bias_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = weight_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = bias_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = weight_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = bias_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = (
checkpoint[F"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 )
)
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def lowerCamelCase__ ( A_ , A_ ):
UpperCAmelCase_ = torch.load(A_ , map_location="cpu" )
UpperCAmelCase_ = {}
UpperCAmelCase_ = checkpoint["time_embed.0.weight"]
UpperCAmelCase_ = checkpoint["time_embed.0.bias"]
UpperCAmelCase_ = checkpoint["time_embed.2.weight"]
UpperCAmelCase_ = checkpoint["time_embed.2.bias"]
if unet_config["num_class_embeds"] is not None:
UpperCAmelCase_ = checkpoint["label_emb.weight"]
UpperCAmelCase_ = checkpoint["input_blocks.0.0.weight"]
UpperCAmelCase_ = checkpoint["input_blocks.0.0.bias"]
UpperCAmelCase_ = unet_config["down_block_types"]
UpperCAmelCase_ = unet_config["layers_per_block"]
UpperCAmelCase_ = unet_config["attention_head_dim"]
UpperCAmelCase_ = unet_config["block_out_channels"]
UpperCAmelCase_ = 1
UpperCAmelCase_ = channels_list[0]
for i, layer_type in enumerate(A_ ):
UpperCAmelCase_ = channels_list[i]
UpperCAmelCase_ = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(A_ ):
UpperCAmelCase_ = F"""down_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase_ = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(A_ ):
UpperCAmelCase_ = F"""down_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase_ = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
UpperCAmelCase_ = F"""down_blocks.{i}.attentions.{j}"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.1"""
UpperCAmelCase_ = convert_attention(
A_ , A_ , A_ , A_ , A_ )
current_layer += 1
if i != len(A_ ) - 1:
UpperCAmelCase_ = F"""down_blocks.{i}.downsamplers.0"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
current_layer += 1
UpperCAmelCase_ = current_channels
# hardcoded the mid-block for now
UpperCAmelCase_ = "mid_block.resnets.0"
UpperCAmelCase_ = "middle_block.0"
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
UpperCAmelCase_ = "mid_block.attentions.0"
UpperCAmelCase_ = "middle_block.1"
UpperCAmelCase_ = convert_attention(A_ , A_ , A_ , A_ , A_ )
UpperCAmelCase_ = "mid_block.resnets.1"
UpperCAmelCase_ = "middle_block.2"
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
UpperCAmelCase_ = 0
UpperCAmelCase_ = unet_config["up_block_types"]
for i, layer_type in enumerate(A_ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase_ = F"""up_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""output_blocks.{current_layer}.0"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
current_layer += 1
if i != len(A_ ) - 1:
UpperCAmelCase_ = F"""up_blocks.{i}.upsamplers.0"""
UpperCAmelCase_ = F"""output_blocks.{current_layer-1}.1"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase_ = F"""up_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""output_blocks.{current_layer}.0"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
UpperCAmelCase_ = F"""up_blocks.{i}.attentions.{j}"""
UpperCAmelCase_ = F"""output_blocks.{current_layer}.1"""
UpperCAmelCase_ = convert_attention(
A_ , A_ , A_ , A_ , A_ )
current_layer += 1
if i != len(A_ ) - 1:
UpperCAmelCase_ = F"""up_blocks.{i}.upsamplers.0"""
UpperCAmelCase_ = F"""output_blocks.{current_layer-1}.2"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
UpperCAmelCase_ = checkpoint["out.0.weight"]
UpperCAmelCase_ = checkpoint["out.0.bias"]
UpperCAmelCase_ = checkpoint["out.2.weight"]
UpperCAmelCase_ = checkpoint["out.2.bias"]
return new_checkpoint
if __name__ == "__main__":
__snake_case : List[str] = argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
__snake_case : List[str] = parser.parse_args()
__snake_case : Any = strabool(args.class_cond)
__snake_case : List[str] = os.path.basename(args.unet_path)
print(F'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
__snake_case : Optional[int] = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__snake_case : Union[str, Any] = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
__snake_case : List[str] = TEST_UNET_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
__snake_case : Optional[Any] = None
__snake_case : Optional[int] = con_pt_to_diffuser(args.unet_path, unet_config)
__snake_case : str = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
__snake_case : Tuple = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
__snake_case : Optional[int] = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__snake_case : Union[str, Any] = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
__snake_case : Optional[Any] = CMStochasticIterativeScheduler(**scheduler_config)
__snake_case : Dict = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 660 | 0 |
import random
def a (_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = num - 1
SCREAMING_SNAKE_CASE_ = 0
while s % 2 == 0:
SCREAMING_SNAKE_CASE_ = s // 2
t += 1
for _ in range(5 ):
SCREAMING_SNAKE_CASE_ = random.randrange(2 , num - 1 )
SCREAMING_SNAKE_CASE_ = pow(A_ , A_ , A_ )
if v != 1:
SCREAMING_SNAKE_CASE_ = 0
while v != (num - 1):
if i == t - 1:
return False
else:
SCREAMING_SNAKE_CASE_ = i + 1
SCREAMING_SNAKE_CASE_ = (v**2) % num
return True
def a (_lowerCAmelCase ):
if num < 2:
return False
SCREAMING_SNAKE_CASE_ = [
2,
3,
5,
7,
1_1,
1_3,
1_7,
1_9,
2_3,
2_9,
3_1,
3_7,
4_1,
4_3,
4_7,
5_3,
5_9,
6_1,
6_7,
7_1,
7_3,
7_9,
8_3,
8_9,
9_7,
1_0_1,
1_0_3,
1_0_7,
1_0_9,
1_1_3,
1_2_7,
1_3_1,
1_3_7,
1_3_9,
1_4_9,
1_5_1,
1_5_7,
1_6_3,
1_6_7,
1_7_3,
1_7_9,
1_8_1,
1_9_1,
1_9_3,
1_9_7,
1_9_9,
2_1_1,
2_2_3,
2_2_7,
2_2_9,
2_3_3,
2_3_9,
2_4_1,
2_5_1,
2_5_7,
2_6_3,
2_6_9,
2_7_1,
2_7_7,
2_8_1,
2_8_3,
2_9_3,
3_0_7,
3_1_1,
3_1_3,
3_1_7,
3_3_1,
3_3_7,
3_4_7,
3_4_9,
3_5_3,
3_5_9,
3_6_7,
3_7_3,
3_7_9,
3_8_3,
3_8_9,
3_9_7,
4_0_1,
4_0_9,
4_1_9,
4_2_1,
4_3_1,
4_3_3,
4_3_9,
4_4_3,
4_4_9,
4_5_7,
4_6_1,
4_6_3,
4_6_7,
4_7_9,
4_8_7,
4_9_1,
4_9_9,
5_0_3,
5_0_9,
5_2_1,
5_2_3,
5_4_1,
5_4_7,
5_5_7,
5_6_3,
5_6_9,
5_7_1,
5_7_7,
5_8_7,
5_9_3,
5_9_9,
6_0_1,
6_0_7,
6_1_3,
6_1_7,
6_1_9,
6_3_1,
6_4_1,
6_4_3,
6_4_7,
6_5_3,
6_5_9,
6_6_1,
6_7_3,
6_7_7,
6_8_3,
6_9_1,
7_0_1,
7_0_9,
7_1_9,
7_2_7,
7_3_3,
7_3_9,
7_4_3,
7_5_1,
7_5_7,
7_6_1,
7_6_9,
7_7_3,
7_8_7,
7_9_7,
8_0_9,
8_1_1,
8_2_1,
8_2_3,
8_2_7,
8_2_9,
8_3_9,
8_5_3,
8_5_7,
8_5_9,
8_6_3,
8_7_7,
8_8_1,
8_8_3,
8_8_7,
9_0_7,
9_1_1,
9_1_9,
9_2_9,
9_3_7,
9_4_1,
9_4_7,
9_5_3,
9_6_7,
9_7_1,
9_7_7,
9_8_3,
9_9_1,
9_9_7,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(A_ )
def a (_lowerCAmelCase = 1_0_2_4 ):
while True:
SCREAMING_SNAKE_CASE_ = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(A_ ):
return num
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =generate_large_prime()
print(("""Prime number:""", num))
print(("""is_prime_low_num:""", is_prime_low_num(num)))
| 234 | '''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
__snake_case : Any = _symbol_database.Default()
__snake_case : Dict = _descriptor_pool.Default().AddSerializedFile(
B'''\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'''
)
__snake_case : Union[str, Any] = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, '''sentencepiece_model_pb2''', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
__snake_case : Any = None
__snake_case : Dict = B'''H\003'''
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
__snake_case : Union[str, Any] = 45
__snake_case : str = 15_81
__snake_case : Optional[int] = 15_17
__snake_case : Optional[Any] = 15_70
__snake_case : Union[str, Any] = 15_84
__snake_case : Any = 17_93
__snake_case : Optional[int] = 17_95
__snake_case : Tuple = 19_16
__snake_case : int = 18_64
__snake_case : Any = 19_05
__snake_case : Optional[int] = 19_19
__snake_case : str = 24_29
__snake_case : Tuple = 22_08
__snake_case : str = 24_18
__snake_case : Tuple = 23_23
__snake_case : Optional[int] = 24_07
# @@protoc_insertion_point(module_scope)
| 660 | 0 |
def lowercase__ ( __snake_case : List[Any] = 10**9 ):
'''simple docstring'''
UpperCAmelCase_ : Any = 1
UpperCAmelCase_ : List[str] = 2
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : Union[str, Any] = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
UpperCAmelCase_ : Optional[int] = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F'{solution() = }')
| 406 | '''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowercase_ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase_ = FlaxXLMRobertaModel.from_pretrained("xlm-roberta-base" )
UpperCAmelCase_ = AutoTokenizer.from_pretrained("xlm-roberta-base" )
UpperCAmelCase_ = "The dog is cute and lives in the garden house"
UpperCAmelCase_ = jnp.array([tokenizer.encode(UpperCamelCase__ )] )
UpperCAmelCase_ = (1, 1_2, 7_6_8) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
UpperCAmelCase_ = model(UpperCamelCase__ )["last_hidden_state"]
self.assertEqual(output.shape , UpperCamelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , UpperCamelCase__ , atol=1e-3 ) )
| 660 | 0 |
"""simple docstring"""
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
A = get_logger(__name__)
class a__ ( enum.Enum ):
lowercase_ = "all_checks"
lowercase_ = "basic_checks"
lowercase_ = "no_checks"
class a__ ( _A ):
pass
class a__ ( _A ):
pass
class a__ ( _A ):
pass
class a__ ( _A ):
pass
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase=None ) -> Tuple:
"""simple docstring"""
if expected_checksums is None:
logger.info("Unable to verify checksums." )
return
if len(set(A_ ) - set(A_ ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(A_ ) - set(A_ ) ) )
if len(set(A_ ) - set(A_ ) ) > 0:
raise UnexpectedDownloadedFile(str(set(A_ ) - set(A_ ) ) )
__UpperCAmelCase : Tuple = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
__UpperCAmelCase : int = " for " + verification_name if verification_name is not None else ""
if len(A_ ) > 0:
raise NonMatchingChecksumError(
f"Checksums didn't match{for_verification_name}:\n"
f"{bad_urls}\n"
"Set `verification_mode='no_checks'` to skip checksums verification and ignore this error" )
logger.info("All the checksums matched successfully" + for_verification_name )
class a__ ( _A ):
pass
class a__ ( _A ):
pass
class a__ ( _A ):
pass
class a__ ( _A ):
pass
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
if expected_splits is None:
logger.info("Unable to verify splits sizes." )
return
if len(set(A_ ) - set(A_ ) ) > 0:
raise ExpectedMoreSplits(str(set(A_ ) - set(A_ ) ) )
if len(set(A_ ) - set(A_ ) ) > 0:
raise UnexpectedSplits(str(set(A_ ) - set(A_ ) ) )
__UpperCAmelCase : List[Any] = [
{"expected": expected_splits[name], "recorded": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(A_ ) > 0:
raise NonMatchingSplitsSizesError(str(A_ ) )
logger.info("All the splits matched successfully." )
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase = True ) -> List[Any]:
"""simple docstring"""
if record_checksum:
__UpperCAmelCase : str = shaaaa()
with open(A_ , "rb" ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , B"" ):
m.update(A_ )
__UpperCAmelCase : Union[str, Any] = m.hexdigest()
else:
__UpperCAmelCase : str = None
return {"num_bytes": os.path.getsize(A_ ), "checksum": checksum}
def _UpperCamelCase ( UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 77 | '''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCamelCase__ ( A_ , A_ , A_ ):
# Construct model
if gpta_config_file == "":
UpperCAmelCase_ = GPTaConfig()
else:
UpperCAmelCase_ = GPTaConfig.from_json_file(A_ )
UpperCAmelCase_ = GPTaModel(A_ )
# Load weights from numpy
load_tf_weights_in_gpta(A_ , A_ , A_ )
# Save pytorch-model
UpperCAmelCase_ = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
UpperCAmelCase_ = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , A_ )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(A_ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__snake_case : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--gpt2_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
__snake_case : Dict = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 660 | 0 |
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = True, True
__SCREAMING_SNAKE_CASE = dfs(A_ , A_ , A_ , A_ )
return path
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = -1
for i in range(A_ ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
__SCREAMING_SNAKE_CASE = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = check_circuit_or_path(A_ , A_ )
if check == 3:
print("graph is not Eulerian" )
print("no path" )
return
__SCREAMING_SNAKE_CASE = 1
if check == 2:
__SCREAMING_SNAKE_CASE = odd_node
print("graph has a Euler path" )
if check == 1:
print("graph has a Euler cycle" )
__SCREAMING_SNAKE_CASE = dfs(A_ , A_ , A_ )
print(A_ )
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
__SCREAMING_SNAKE_CASE = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
__SCREAMING_SNAKE_CASE = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
__SCREAMING_SNAKE_CASE = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
__SCREAMING_SNAKE_CASE = {
1: [],
2: []
# all degree is zero
}
__SCREAMING_SNAKE_CASE = 10
check_euler(A_ , A_ )
check_euler(A_ , A_ )
check_euler(A_ , A_ )
check_euler(A_ , A_ )
check_euler(A_ , A_ )
if __name__ == "__main__":
main()
| 682 | '''simple docstring'''
from typing import List
from .keymap import KEYMAP, get_character
def lowerCamelCase__ ( A_ ):
def decorator(A_ ):
UpperCAmelCase_ = getattr(A_ , "handle_key" , [] )
handle += [key]
setattr(A_ , "handle_key" , A_ )
return func
return decorator
def lowerCamelCase__ ( *A_ ):
def decorator(A_ ):
UpperCAmelCase_ = getattr(A_ , "handle_key" , [] )
handle += keys
setattr(A_ , "handle_key" , A_ )
return func
return decorator
class lowercase_ ( _A ):
def __new__( cls , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = super().__new__(cls , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not hasattr(UpperCamelCase__ , "key_handler" ):
setattr(UpperCamelCase__ , "key_handler" , {} )
setattr(UpperCamelCase__ , "handle_input" , KeyHandler.handle_input )
for value in attrs.values():
UpperCAmelCase_ = getattr(UpperCamelCase__ , "handle_key" , [] )
for key in handled_keys:
UpperCAmelCase_ = value
return new_cls
@staticmethod
def lowerCamelCase_ ( cls ) -> str:
"""simple docstring"""
UpperCAmelCase_ = get_character()
if char != KEYMAP["undefined"]:
UpperCAmelCase_ = ord(UpperCamelCase__ )
UpperCAmelCase_ = cls.key_handler.get(UpperCamelCase__ )
if handler:
UpperCAmelCase_ = char
return handler(cls )
else:
return None
def lowerCamelCase__ ( cls ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 660 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_UpperCamelCase = logging.get_logger(__name__)
class __UpperCAmelCase (_A , _A ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = 'maskformer-swin'
_UpperCamelCase : int = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , snake_case_=224 , snake_case_=4 , snake_case_=3 , snake_case_=96 , snake_case_=[2, 2, 6, 2] , snake_case_=[3, 6, 12, 24] , snake_case_=7 , snake_case_=4.0 , snake_case_=True , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_="gelu" , snake_case_=False , snake_case_=0.02 , snake_case_=1E-5 , snake_case_=None , snake_case_=None , **snake_case_ , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
A__ : List[Any] = image_size
A__ : Optional[Any] = patch_size
A__ : List[str] = num_channels
A__ : Optional[int] = embed_dim
A__ : Any = depths
A__ : Optional[int] = len(UpperCamelCase__ )
A__ : List[Any] = num_heads
A__ : Any = window_size
A__ : Union[str, Any] = mlp_ratio
A__ : Dict = qkv_bias
A__ : List[str] = hidden_dropout_prob
A__ : List[str] = attention_probs_dropout_prob
A__ : Tuple = drop_path_rate
A__ : List[str] = hidden_act
A__ : Optional[Any] = use_absolute_embeddings
A__ : List[Any] = layer_norm_eps
A__ : Union[str, Any] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
A__ : Optional[int] = int(embed_dim * 2 ** (len(UpperCamelCase__ ) - 1) )
A__ : Union[str, Any] = ["""stem"""] + [F'''stage{idx}''' for idx in range(1 , len(UpperCamelCase__ ) + 1 )]
A__ , A__ : List[Any] = get_aligned_output_features_output_indices(
out_features=UpperCamelCase__ , out_indices=UpperCamelCase__ , stage_names=self.stage_names )
| 363 | '''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__snake_case : Optional[Any] = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowercase_ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=1_6 , UpperCamelCase__=1_3 , UpperCamelCase__=7 , UpperCamelCase__=1_4 , UpperCamelCase__=1_0 , UpperCamelCase__=1_9 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=True , UpperCamelCase__=1_6 , UpperCamelCase__=2 , UpperCamelCase__=4 , UpperCamelCase__=4 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=[1, 2, 3, 4, 5] , UpperCamelCase__=2_5 , UpperCamelCase__=5 , ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = d_model
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = prediction_length
UpperCAmelCase_ = context_length
UpperCAmelCase_ = cardinality
UpperCAmelCase_ = num_time_features
UpperCAmelCase_ = lags_sequence
UpperCAmelCase_ = embedding_dimension
UpperCAmelCase_ = is_training
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = context_length
UpperCAmelCase_ = prediction_length + label_length
UpperCAmelCase_ = label_length
UpperCAmelCase_ = moving_average
UpperCAmelCase_ = autocorrelation_factor
def lowerCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = config.context_length + max(config.lags_sequence )
UpperCAmelCase_ = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
UpperCAmelCase_ = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
UpperCAmelCase_ = floats_tensor([self.batch_size, _past_length] )
UpperCAmelCase_ = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
UpperCAmelCase_ = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
UpperCAmelCase_ = floats_tensor([self.batch_size, config.prediction_length] )
UpperCAmelCase_ = {
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = self.get_config()
UpperCAmelCase_ = self.prepare_autoformer_inputs_dict(UpperCamelCase__ )
return config, inputs_dict
def lowerCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> str:
"""simple docstring"""
UpperCAmelCase_ = AutoformerModel(config=UpperCamelCase__ ).to(UpperCamelCase__ ).eval()
UpperCAmelCase_ = model(**UpperCamelCase__ )
UpperCAmelCase_ = outputs.encoder_last_hidden_state
UpperCAmelCase_ = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = model.get_encoder()
encoder.save_pretrained(UpperCamelCase__ )
UpperCAmelCase_ = AutoformerEncoder.from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = model.create_network_inputs(**UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
UpperCAmelCase_ = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
UpperCAmelCase_ = encoder(inputs_embeds=UpperCamelCase__ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
UpperCAmelCase_ = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
UpperCAmelCase_ = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
UpperCAmelCase_ = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
UpperCAmelCase_ = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = model.get_decoder()
decoder.save_pretrained(UpperCamelCase__ )
UpperCAmelCase_ = AutoformerDecoder.from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
UpperCAmelCase_ = decoder(
trend=UpperCamelCase__ , inputs_embeds=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class lowercase_ ( _A , _A , unittest.TestCase ):
a_ = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
a_ = (AutoformerForPrediction,) if is_torch_available() else ()
a_ = {"""feature-extraction""": AutoformerModel} if is_torch_available() else {}
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = AutoformerModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = model_class.from_pretrained(UpperCamelCase__ , output_loading_info=UpperCamelCase__ )
self.assertEqual(info["missing_keys"] , [] )
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*UpperCamelCase__ )
@unittest.skip(reason="Model has no tokens embeddings" )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
pass
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase_ = inspect.signature(getattr(UpperCamelCase__ , "forward" ) )
# The main input is the name of the argument after `self`
UpperCAmelCase_ = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , UpperCamelCase__ )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(UpperCamelCase__ )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = [
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(UpperCamelCase__ )] , UpperCamelCase__ )
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = True
UpperCAmelCase_ = getattr(self.model_tester , "seq_length" , UpperCamelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , "decoder_seq_length" , UpperCamelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , "encoder_seq_length" , UpperCamelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , "d_model" , UpperCamelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , "num_attention_heads" , UpperCamelCase__ )
UpperCAmelCase_ = d_model // num_attention_heads
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
UpperCAmelCase_ = False
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
UpperCAmelCase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
UpperCAmelCase_ = outputs.encoder_attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
UpperCAmelCase_ = len(UpperCamelCase__ )
UpperCAmelCase_ = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
# decoder attentions
UpperCAmelCase_ = outputs.decoder_attentions
self.assertIsInstance(UpperCamelCase__ , (list, tuple) )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
UpperCAmelCase_ = outputs.cross_attentions
self.assertIsInstance(UpperCamelCase__ , (list, tuple) )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
UpperCAmelCase_ = True
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + 2 , len(UpperCamelCase__ ) )
UpperCAmelCase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def lowerCamelCase_ ( self ) -> str:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def lowerCamelCase__ ( A_="train-batch.pt" ):
UpperCAmelCase_ = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=A_ , repo_type="dataset" )
UpperCAmelCase_ = torch.load(A_ , map_location=A_ )
return batch
@require_torch
@slow
class lowercase_ ( unittest.TestCase ):
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(UpperCamelCase__ )
UpperCAmelCase_ = prepare_batch()
with torch.no_grad():
UpperCAmelCase_ = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
UpperCAmelCase_ = torch.Size(
(6_4, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , UpperCamelCase__ )
UpperCAmelCase_ = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
def lowerCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(UpperCamelCase__ )
UpperCAmelCase_ = prepare_batch("val-batch.pt" )
with torch.no_grad():
UpperCAmelCase_ = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
UpperCAmelCase_ = torch.Size((6_4, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , UpperCamelCase__ )
UpperCAmelCase_ = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(UpperCamelCase__ )
UpperCAmelCase_ = prepare_batch("val-batch.pt" )
with torch.no_grad():
UpperCAmelCase_ = model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
UpperCAmelCase_ = torch.Size((6_4, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , UpperCamelCase__ )
UpperCAmelCase_ = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=UpperCamelCase__ )
UpperCAmelCase_ = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , UpperCamelCase__ , rtol=1e-1 ) )
| 660 | 0 |
from manim import *
class _lowercase ( _A ):
'''simple docstring'''
def _a ( self ):
lowerCAmelCase_: Optional[int] = Rectangle(height=0.5 , width=0.5 )
lowerCAmelCase_: Optional[int] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
lowerCAmelCase_: Dict = [mem.copy() for i in range(6 )]
lowerCAmelCase_: Dict = [mem.copy() for i in range(6 )]
lowerCAmelCase_: str = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
lowerCAmelCase_: List[Any] = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
lowerCAmelCase_: Any = VGroup(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
lowerCAmelCase_: int = Text("CPU" , font_size=24 )
lowerCAmelCase_: Optional[int] = Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCamelCase__ )
lowerCAmelCase_: str = [mem.copy() for i in range(4 )]
lowerCAmelCase_: Union[str, Any] = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
lowerCAmelCase_: Optional[Any] = Text("GPU" , font_size=24 )
lowerCAmelCase_: str = Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ )
gpu.move_to([-1, -1, 0] )
self.add(UpperCamelCase__ )
lowerCAmelCase_: str = [mem.copy() for i in range(6 )]
lowerCAmelCase_: int = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
lowerCAmelCase_: List[Any] = Text("Model" , font_size=24 )
lowerCAmelCase_: List[str] = Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ )
model.move_to([3, -1.0, 0] )
self.add(UpperCamelCase__ )
lowerCAmelCase_: List[Any] = []
for i, rect in enumerate(UpperCamelCase__ ):
rect.set_stroke(UpperCamelCase__ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
lowerCAmelCase_: List[Any] = Rectangle(height=0.4_6 / 4 , width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(UpperCamelCase__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=UpperCamelCase__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=UpperCamelCase__ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=UpperCamelCase__ , buff=0.0 )
self.add(UpperCamelCase__ )
cpu_targs.append(UpperCamelCase__ )
lowerCAmelCase_: Optional[Any] = [mem.copy() for i in range(6 )]
lowerCAmelCase_: Dict = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
lowerCAmelCase_: Optional[Any] = Text("Loaded Checkpoint" , font_size=24 )
lowerCAmelCase_: int = Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , aligned_edge=UpperCamelCase__ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
lowerCAmelCase_: Union[str, Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCAmelCase_: str = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(UpperCamelCase__ , UpperCamelCase__ )
lowerCAmelCase_: Optional[int] = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(UpperCamelCase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
lowerCAmelCase_: Any = MarkupText(
F'''Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCamelCase__ ) , Write(UpperCamelCase__ ) )
self.play(Write(UpperCamelCase__ , run_time=1 ) , Create(UpperCamelCase__ , run_time=1 ) )
lowerCAmelCase_: str = []
lowerCAmelCase_: Dict = []
for i, rect in enumerate(UpperCamelCase__ ):
lowerCAmelCase_: Any = fill.copy().set_fill(UpperCamelCase__ , opacity=0.7 )
target.move_to(UpperCamelCase__ )
first_animations.append(GrowFromCenter(UpperCamelCase__ , run_time=1 ) )
lowerCAmelCase_: Optional[Any] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(UpperCamelCase__ , run_time=1.5 ) )
self.play(*UpperCamelCase__ )
self.play(*UpperCamelCase__ )
self.wait() | 613 | '''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case : Dict = logging.get_logger(__name__)
__snake_case : Tuple = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
__snake_case : Tuple = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
__snake_case : Dict = {
'''abeja/gpt-neox-japanese-2.7b''': 20_48,
}
def lowerCamelCase__ ( A_ , A_ ):
with open(A_ , "r" , encoding="utf-8" ) as f:
UpperCAmelCase_ = json.loads(f.read() )
UpperCAmelCase_ = collections.OrderedDict()
UpperCAmelCase_ = collections.OrderedDict()
UpperCAmelCase_ = collections.OrderedDict()
with open(A_ , "r" , encoding="utf-8" ) as f:
UpperCAmelCase_ = f.readlines()
UpperCAmelCase_ = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(A_ ):
UpperCAmelCase_ = b
UpperCAmelCase_ = idx
for wd in b:
UpperCAmelCase_ = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class lowercase_ ( _A ):
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|startoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__=False , **UpperCamelCase__ , ) -> int:
"""simple docstring"""
super().__init__(
unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , do_clean_text=UpperCamelCase__ , **UpperCamelCase__ , )
if not os.path.isfile(UpperCamelCase__ ):
raise ValueError(
F"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
if not os.path.isfile(UpperCamelCase__ ):
raise ValueError(
F"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
UpperCAmelCase_ = do_clean_text
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = load_vocab_and_emoji(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase_ = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
return len(self.raw_vocab )
def lowerCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
return self.subword_tokenizer.tokenize(UpperCamelCase__ , clean=self.do_clean_text )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> int:
"""simple docstring"""
return self.vocab.get(UpperCamelCase__ , self.vocab.get(self.unk_token ) )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[Any]:
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = "".join(UpperCamelCase__ ).strip()
return out_string
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[int]:
"""simple docstring"""
UpperCAmelCase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] )
if len(UpperCamelCase__ ) > self.model_max_length:
UpperCAmelCase_ = input_ids[-self.model_max_length :]
return input_ids
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
"""simple docstring"""
UpperCAmelCase_ = 0
if os.path.isdir(UpperCamelCase__ ):
UpperCAmelCase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] )
else:
UpperCAmelCase_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
UpperCAmelCase_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!" )
UpperCAmelCase_ = token_index
writer.write(",".join(UpperCamelCase__ ) + "\n" )
index += 1
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as writer:
json.dump(self.emoji , UpperCamelCase__ )
return vocab_file, emoji_file
class lowercase_ ( _A ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = vocab # same as swe
UpperCAmelCase_ = ids_to_tokens # same as bpe
UpperCAmelCase_ = emoji
UpperCAmelCase_ = np.max([len(UpperCamelCase__ ) for w in self.vocab.keys()] )
UpperCAmelCase_ = re.compile(R"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)" )
UpperCAmelCase_ = re.compile(R"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*" )
UpperCAmelCase_ = re.compile(R"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}" )
UpperCAmelCase_ = re.compile(
R"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
UpperCAmelCase_ = re.compile(
R"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
UpperCAmelCase_ = re.compile(
R"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" )
UpperCAmelCase_ = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
UpperCAmelCase_ = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
UpperCAmelCase_ = str.maketrans({k: "<BLOCK>" for k in keisen + blocks} )
def __len__( self ) -> int:
"""simple docstring"""
return len(self.ids_to_tokens )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = self.content_repattera.sub("<URL>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<EMAIL>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<TEL>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<DATE>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<DATE>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<PRICE>" , UpperCamelCase__ )
UpperCAmelCase_ = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
UpperCAmelCase_ = content.replace("<BLOCK><BLOCK>" , "<BLOCK>" )
return content
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__=False ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = text.replace(" " , "<SP>" )
UpperCAmelCase_ = text.replace(" " , "<SP>" )
UpperCAmelCase_ = text.replace("\r\n" , "<BR>" )
UpperCAmelCase_ = text.replace("\n" , "<BR>" )
UpperCAmelCase_ = text.replace("\r" , "<BR>" )
UpperCAmelCase_ = text.replace("\t" , "<TAB>" )
UpperCAmelCase_ = text.replace("—" , "ー" )
UpperCAmelCase_ = text.replace("−" , "ー" )
for k, v in self.emoji["emoji"].items():
if k in text:
UpperCAmelCase_ = text.replace(UpperCamelCase__ , UpperCamelCase__ )
if clean:
UpperCAmelCase_ = self.clean_text(UpperCamelCase__ )
def check_simbol(UpperCamelCase__ ):
UpperCAmelCase_ = x.encode()
if len(UpperCamelCase__ ) == 1 and len(UpperCamelCase__ ) == 2:
UpperCAmelCase_ = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0Xc_2_a_1 and c <= 0Xc_2_b_f)
or (c >= 0Xc_7_8_0 and c <= 0Xc_7_8_3)
or (c >= 0Xc_a_b_9 and c <= 0Xc_b_b_f)
or (c >= 0Xc_c_8_0 and c <= 0Xc_d_a_2)
):
return True
return False
def checkuae(UpperCamelCase__ ):
UpperCAmelCase_ = x.encode()
if len(UpperCamelCase__ ) == 1 and len(UpperCamelCase__ ) == 3:
UpperCAmelCase_ = (int(e[0] ) << 1_6) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0Xe_2_8_0_8_0 and c <= 0Xe_2_b_0_7_f:
return True
return False
UpperCAmelCase_ = 0
UpperCAmelCase_ = []
while pos < len(UpperCamelCase__ ):
UpperCAmelCase_ = min(len(UpperCamelCase__ ) , pos + self.maxlen + 1 ) if text[pos] == "<" else pos + 3
UpperCAmelCase_ = [] # (token_id, token, pos)
for e in range(UpperCamelCase__ , UpperCamelCase__ , -1 ):
UpperCAmelCase_ = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(UpperCamelCase__ ) > 2:
UpperCAmelCase_ = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(UpperCamelCase__ ) > 0:
# the smallest token_id is adopted
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x[0] )[0]
result.append(UpperCamelCase__ )
UpperCAmelCase_ = e
else:
UpperCAmelCase_ = pos + 1
UpperCAmelCase_ = text[pos:end]
if check_simbol(UpperCamelCase__ ):
result.append("<KIGOU>" )
elif checkuae(UpperCamelCase__ ):
result.append("<U2000U2BFF>" )
else:
for i in wd.encode("utf-8" ):
result.append("<|byte%d|>" % i )
UpperCAmelCase_ = end
return result
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__="\n" ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(UpperCamelCase__ ) > 0:
words.append(bytearray(UpperCamelCase__ ).decode("utf-8" , errors="replace" ) )
UpperCAmelCase_ = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word] )
elif word == "<SP>":
words.append(" " )
elif word == "<BR>":
words.append(UpperCamelCase__ )
elif word == "<TAB>":
words.append("\t" )
elif word == "<BLOCK>":
words.append("▀" )
elif word == "<KIGOU>":
words.append("ǀ" )
elif word == "<U2000U2BFF>":
words.append("‖" )
else:
words.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
words.append(bytearray(UpperCamelCase__ ).decode("utf-8" , errors="replace" ) )
UpperCAmelCase_ = "".join(UpperCamelCase__ )
return text
| 660 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class __a (_A):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Optional[Any] = 42
_SCREAMING_SNAKE_CASE :List[Any] = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.26.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version(">=", "0.0.12")
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class __a (_A):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Any = 42
_SCREAMING_SNAKE_CASE :List[Any] = 42
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 680 | '''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
__snake_case : Union[str, Any] = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def lowerCamelCase__ ( ):
UpperCAmelCase_ = Github(os.environ["GITHUB_TOKEN"] )
UpperCAmelCase_ = g.get_repo("huggingface/diffusers" )
UpperCAmelCase_ = repo.get_issues(state="open" )
for issue in open_issues:
UpperCAmelCase_ = sorted(issue.get_comments() , key=lambda A_ : i.created_at , reverse=A_ )
UpperCAmelCase_ = comments[0] if len(A_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="closed" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open" )
issue.remove_from_labels("stale" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
issue.add_to_labels("stale" )
if __name__ == "__main__":
main()
| 660 | 0 |
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def A_ ( lowercase , lowercase , lowercase = 10**-10 ) -> str:
"""simple docstring"""
UpperCAmelCase_ : List[str] = a
while True:
UpperCAmelCase_ : List[str] = Decimal(A_ ) - (
Decimal(eval(A_ ) ) / Decimal(eval(str(diff(A_ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(A_ ) ) < precision: # noqa: S307
return float(A_ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
print(f"""The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}""")
# Find Square Root of 5
print(f"""The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}""")
# Exponential Roots
print(f"""The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}""")
| 470 | '''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
__snake_case : List[Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowercase_ ( datasets.BuilderConfig ):
a_ = 1_0000
a_ = None
a_ = None
class lowercase_ ( datasets.ArrowBasedBuilder ):
a_ = ParquetConfig
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
UpperCAmelCase_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCamelCase__ , (str, list, tuple) ):
UpperCAmelCase_ = data_files
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCAmelCase_ = [dl_manager.iter_files(UpperCamelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
UpperCAmelCase_ = []
for split_name, files in data_files.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCAmelCase_ = [dl_manager.iter_files(UpperCamelCase__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(UpperCamelCase__ ):
with open(UpperCamelCase__ , "rb" ) as f:
UpperCAmelCase_ = datasets.Features.from_arrow_schema(pq.read_schema(UpperCamelCase__ ) )
break
splits.append(datasets.SplitGenerator(name=UpperCamelCase__ , gen_kwargs={"files": files} ) )
return splits
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> pa.Table:
"""simple docstring"""
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCAmelCase_ = table_cast(UpperCamelCase__ , self.info.features.arrow_schema )
return pa_table
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" )
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCamelCase__ ) ):
with open(UpperCamelCase__ , "rb" ) as f:
UpperCAmelCase_ = pq.ParquetFile(UpperCamelCase__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
UpperCAmelCase_ = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F"""{file_idx}_{batch_idx}""", self._cast_table(UpperCamelCase__ )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(UpperCamelCase__ )}: {e}""" )
raise
| 660 | 0 |
'''simple docstring'''
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
_A: str = '''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
_A: List[str] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
_A: List[Any] = dict(zip(vocab, range(len(vocab))))
_A: Any = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
_A: Optional[Any] = Path(tmpdirname)
_A: Optional[int] = build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
_A: Optional[int] = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
_A: Tuple = build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, """w""") as fp:
fp.write("""\n""".join(merges))
_A: int = FSMTTokenizer(
langs=["""en""", """ru"""],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
_A: Tuple = FSMTConfig(
langs=["""ru""", """en"""],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
_A: Tuple = FSMTForConditionalGeneration(config)
print(F"""num of params {tiny_model.num_parameters()}""")
# Test
_A: Optional[Any] = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
_A: Union[str, Any] = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 126 | '''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case : Tuple = logging.get_logger(__name__)
__snake_case : Tuple = {'''vocab_file''': '''spiece.model'''}
__snake_case : Dict = {
'''vocab_file''': {
'''AI-Sweden/gpt-sw3-126m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-350m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-1.6b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-6.7b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-20b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model''',
}
}
__snake_case : Tuple = {
'''AI-Sweden/gpt-sw3-126m''': 20_48,
'''AI-Sweden/gpt-sw3-350m''': 20_48,
'''AI-Sweden/gpt-sw3-1.6b''': 20_48,
'''AI-Sweden/gpt-sw3-6.7b''': 20_48,
'''AI-Sweden/gpt-sw3-20b''': 20_48,
}
class lowercase_ ( _A ):
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self , UpperCamelCase__ , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> None:
"""simple docstring"""
UpperCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCAmelCase_ = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
UpperCAmelCase_ = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
UpperCAmelCase_ = "<|endoftext|>" if eos_token is None else eos_token
UpperCAmelCase_ = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
UpperCAmelCase_ = unk_token if pad_token is None else pad_token
UpperCAmelCase_ = eos_token if bos_token is None else bos_token
else:
UpperCAmelCase_ = "<pad>" if pad_token is None else pad_token
UpperCAmelCase_ = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = remove_space
UpperCAmelCase_ = keep_accents
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
# Used for whitespace normalization in input texts
# fmt : off
UpperCAmelCase_ = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
UpperCAmelCase_ = re.compile(
F"""[{"".join(map(UpperCamelCase__ , list(range(0 , 9 ) ) + list(range(1_1 , 3_2 ) ) + list(range(1_2_7 , 1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]""" )
def __getstate__( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = self.__dict__.copy()
UpperCAmelCase_ = None
return state
def __setstate__( self , UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
return len(self.sp_model )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
UpperCAmelCase_ = self.non_printing_characters_re.sub("" , UpperCamelCase__ )
# Normalize whitespaces
UpperCAmelCase_ = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
UpperCAmelCase_ = unicodedata.normalize("NFC" , UpperCamelCase__ )
return text
def lowerCamelCase_ ( self , UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = self.preprocess_text(UpperCamelCase__ )
return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> int:
"""simple docstring"""
return self.sp_model.PieceToId(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
return self.sp_model.IdToPiece(UpperCamelCase__ )
@staticmethod
def lowerCamelCase_ ( UpperCamelCase__ ) -> str:
"""simple docstring"""
return out_string
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = ""
UpperCAmelCase_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCamelCase__ ) + token
UpperCAmelCase_ = True
UpperCAmelCase_ = []
else:
current_sub_tokens.append(UpperCamelCase__ )
UpperCAmelCase_ = False
out_string += self.sp_model.decode(UpperCamelCase__ )
return out_string
def lowerCamelCase_ ( self ) -> Dict[str, int]:
"""simple docstring"""
UpperCAmelCase_ = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , "wb" ) as fi:
UpperCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
"""simple docstring"""
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = self.preprocess_text(UpperCamelCase__ )
UpperCAmelCase_ = self.sp_model.encode(UpperCamelCase__ )
else:
UpperCAmelCase_ = [self.preprocess_text(UpperCamelCase__ ) for t in text]
UpperCAmelCase_ = self.sp_model.encode(UpperCamelCase__ )
if return_tensors is True or return_tensors == "pt":
UpperCAmelCase_ = torch.tensor(UpperCamelCase__ )
return token_ids
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
return self.sp_model.decode(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[int]:
"""simple docstring"""
UpperCAmelCase_ = [F"""User: {text}""" if is_user else F"""Bot: {text}""" for is_user, text in conversation.iter_texts()]
UpperCAmelCase_ = (
F"""{self.eos_token}{self.bos_token}""" + F"""{self.bos_token}""".join(UpperCamelCase__ ) + F"""{self.bos_token}Bot:"""
)
return self.encode(text=UpperCamelCase__ )
| 660 | 0 |
from __future__ import annotations
from typing import Any
class _A :
def __init__( self : int , lowerCamelCase__ : Any ):
"""simple docstring"""
__UpperCamelCase : List[Any] = num_of_nodes
__UpperCamelCase : List[str] = []
__UpperCamelCase : Union[str, Any] = {}
def a ( self : Dict , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : int ):
"""simple docstring"""
self.m_edges.append([u_node, v_node, weight] )
def a ( self : List[Any] , lowerCamelCase__ : Dict ):
"""simple docstring"""
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def a ( self : List[Any] , lowerCamelCase__ : Dict ):
"""simple docstring"""
if self.m_component[u_node] != u_node:
for k in self.m_component:
__UpperCamelCase : Optional[int] = self.find_component(UpperCamelCase__ )
def a ( self : List[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : List[str] ):
"""simple docstring"""
if component_size[u_node] <= component_size[v_node]:
__UpperCamelCase : Tuple = v_node
component_size[v_node] += component_size[u_node]
self.set_component(UpperCamelCase__ )
elif component_size[u_node] >= component_size[v_node]:
__UpperCamelCase : Optional[Any] = self.find_component(UpperCamelCase__ )
component_size[u_node] += component_size[v_node]
self.set_component(UpperCamelCase__ )
def a ( self : int ):
"""simple docstring"""
__UpperCamelCase : Dict = []
__UpperCamelCase : int = 0
__UpperCamelCase : Optional[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
__UpperCamelCase : int = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Optional[int] = edge
__UpperCamelCase : str = self.m_component[u]
__UpperCamelCase : int = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
__UpperCamelCase : Optional[int] = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[Any] = edge
__UpperCamelCase : int = self.m_component[u]
__UpperCamelCase : Dict = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
print(f'Added edge [{u} - {v}]\nAdded weight: {w}\n' )
num_of_components -= 1
__UpperCamelCase : Dict = [-1] * self.m_num_of_nodes
print(f'The total weight of the minimal spanning tree is: {mst_weight}' )
def __lowerCamelCase ( ) -> Optional[int]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 269 | '''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowercase_ ( unittest.TestCase ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__=7 , UpperCamelCase__=3 , UpperCamelCase__=1_8 , UpperCamelCase__=3_0 , UpperCamelCase__=4_0_0 , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=[0.5, 0.5, 0.5] , UpperCamelCase__=[0.5, 0.5, 0.5] , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = size if size is not None else {"shortest_edge": 1_8}
UpperCAmelCase_ = crop_size if crop_size is not None else {"height": 1_8, "width": 1_8}
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
def lowerCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowercase_ ( _A , unittest.TestCase ):
a_ = LevitImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = LevitImageProcessingTester(self )
@property
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , "image_mean" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "image_std" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_resize" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_center_crop" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "size" ) )
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 1_8} )
self.assertEqual(image_processor.crop_size , {"height": 1_8, "width": 1_8} )
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2} )
self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4} )
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
pass
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCamelCase_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 660 | 0 |
'''simple docstring'''
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
return x + 2
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : int = """x = 3"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = {}
_SCREAMING_SNAKE_CASE : Tuple = evaluate(UpperCamelCase__ , {} , state=UpperCamelCase__ )
assert result == 3
self.assertDictEqual(UpperCamelCase__ , {"""x""": 3} )
_SCREAMING_SNAKE_CASE : Any = """x = y"""
_SCREAMING_SNAKE_CASE : Any = {"""y""": 5}
_SCREAMING_SNAKE_CASE : int = evaluate(UpperCamelCase__ , {} , state=UpperCamelCase__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCamelCase__ , {"""x""": 5, """y""": 5} )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Optional[Any] = """y = add_two(x)"""
_SCREAMING_SNAKE_CASE : List[str] = {"""x""": 3}
_SCREAMING_SNAKE_CASE : Optional[Any] = evaluate(UpperCamelCase__ , {"""add_two""": add_two} , state=UpperCamelCase__ )
assert result == 5
self.assertDictEqual(UpperCamelCase__ , {"""x""": 3, """y""": 5} )
# Won't work without the tool
with CaptureStdout() as out:
_SCREAMING_SNAKE_CASE : List[str] = evaluate(UpperCamelCase__ , {} , state=UpperCamelCase__ )
assert result is None
assert "tried to execute add_two" in out.out
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : int = """x = 3"""
_SCREAMING_SNAKE_CASE : List[str] = {}
_SCREAMING_SNAKE_CASE : int = evaluate(UpperCamelCase__ , {} , state=UpperCamelCase__ )
assert result == 3
self.assertDictEqual(UpperCamelCase__ , {"""x""": 3} )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : int = """test_dict = {'x': x, 'y': add_two(x)}"""
_SCREAMING_SNAKE_CASE : List[Any] = {"""x""": 3}
_SCREAMING_SNAKE_CASE : Tuple = evaluate(UpperCamelCase__ , {"""add_two""": add_two} , state=UpperCamelCase__ )
self.assertDictEqual(UpperCamelCase__ , {"""x""": 3, """y""": 5} )
self.assertDictEqual(UpperCamelCase__ , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Dict = """x = 3\ny = 5"""
_SCREAMING_SNAKE_CASE : int = {}
_SCREAMING_SNAKE_CASE : Union[str, Any] = evaluate(UpperCamelCase__ , {} , state=UpperCamelCase__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCamelCase__ , {"""x""": 3, """y""": 5} )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : List[Any] = """text = f'This is x: {x}.'"""
_SCREAMING_SNAKE_CASE : Dict = {"""x""": 3}
_SCREAMING_SNAKE_CASE : Union[str, Any] = evaluate(UpperCamelCase__ , {} , state=UpperCamelCase__ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(UpperCamelCase__ , {"""x""": 3, """text""": """This is x: 3."""} )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Dict = """if x <= 3:\n y = 2\nelse:\n y = 5"""
_SCREAMING_SNAKE_CASE : Dict = {"""x""": 3}
_SCREAMING_SNAKE_CASE : Any = evaluate(UpperCamelCase__ , {} , state=UpperCamelCase__ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(UpperCamelCase__ , {"""x""": 3, """y""": 2} )
_SCREAMING_SNAKE_CASE : Optional[Any] = {"""x""": 8}
_SCREAMING_SNAKE_CASE : List[Any] = evaluate(UpperCamelCase__ , {} , state=UpperCamelCase__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCamelCase__ , {"""x""": 8, """y""": 5} )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : int = """test_list = [x, add_two(x)]"""
_SCREAMING_SNAKE_CASE : Optional[Any] = {"""x""": 3}
_SCREAMING_SNAKE_CASE : Tuple = evaluate(UpperCamelCase__ , {"""add_two""": add_two} , state=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , [3, 5] )
self.assertDictEqual(UpperCamelCase__ , {"""x""": 3, """test_list""": [3, 5]} )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Dict = """y = x"""
_SCREAMING_SNAKE_CASE : Tuple = {"""x""": 3}
_SCREAMING_SNAKE_CASE : List[str] = evaluate(UpperCamelCase__ , {} , state=UpperCamelCase__ )
assert result == 3
self.assertDictEqual(UpperCamelCase__ , {"""x""": 3, """y""": 3} )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : List[Any] = """test_list = [x, add_two(x)]\ntest_list[1]"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = {"""x""": 3}
_SCREAMING_SNAKE_CASE : Optional[Any] = evaluate(UpperCamelCase__ , {"""add_two""": add_two} , state=UpperCamelCase__ )
assert result == 5
self.assertDictEqual(UpperCamelCase__ , {"""x""": 3, """test_list""": [3, 5]} )
_SCREAMING_SNAKE_CASE : List[str] = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"""
_SCREAMING_SNAKE_CASE : Tuple = {"""x""": 3}
_SCREAMING_SNAKE_CASE : Optional[Any] = evaluate(UpperCamelCase__ , {"""add_two""": add_two} , state=UpperCamelCase__ )
assert result == 5
self.assertDictEqual(UpperCamelCase__ , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Any = """x = 0\nfor i in range(3):\n x = i"""
_SCREAMING_SNAKE_CASE : Optional[int] = {}
_SCREAMING_SNAKE_CASE : Any = evaluate(UpperCamelCase__ , {"""range""": range} , state=UpperCamelCase__ )
assert result == 2
self.assertDictEqual(UpperCamelCase__ , {"""x""": 2, """i""": 2} )
| 533 | '''simple docstring'''
from __future__ import annotations
import math
from collections.abc import Callable
def lowerCamelCase__ ( A_ , A_ , A_ , A_ = 100 , ):
UpperCAmelCase_ = x_start
UpperCAmelCase_ = fnc(A_ )
UpperCAmelCase_ = 0.0
for _ in range(A_ ):
# Approximates curve as a sequence of linear lines and sums their length
UpperCAmelCase_ = (x_end - x_start) / steps + xa
UpperCAmelCase_ = fnc(A_ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
UpperCAmelCase_ = xa
UpperCAmelCase_ = fxa
return length
if __name__ == "__main__":
def lowerCamelCase__ ( A_ ):
return math.sin(10 * x )
print('''f(x) = sin(10 * x)''')
print('''The length of the curve from x = -10 to x = 10 is:''')
__snake_case : List[Any] = 10
while i <= 10_00_00:
print(F'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 660 | 0 |
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def a (_lowerCAmelCase ):
if isinstance(A_ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class __magic_name__ :
'''simple docstring'''
def _A ( self: List[str] , _lowerCamelCase: int , _lowerCamelCase: Optional[Any] ):
pass
def _A ( self: List[Any] ):
pass
def _A ( self: Dict ):
pass
def _A ( self: Any , _lowerCamelCase: List[str] , _lowerCamelCase: Dict , _lowerCamelCase: List[Any] ):
SCREAMING_SNAKE_CASE_ = np.abs((a - b) ).max()
self.assertLessEqual(UpperCamelCase__ , UpperCamelCase__ , f"Difference between torch and flax is {diff} (>= {tol})." )
def _A ( self: Union[str, Any] , _lowerCamelCase: str , _lowerCamelCase: int , _lowerCamelCase: Dict , _lowerCamelCase: List[str] , _lowerCamelCase: Dict=None , **_lowerCamelCase: Dict ):
SCREAMING_SNAKE_CASE_ = VisionTextDualEncoderConfig.from_vision_text_configs(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = model(input_ids=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) )
def _A ( self: Optional[Any] , _lowerCamelCase: str , _lowerCamelCase: List[Any] , _lowerCamelCase: Union[str, Any] , _lowerCamelCase: Union[str, Any] , _lowerCamelCase: int=None , **_lowerCamelCase: Optional[Any] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.get_vision_text_model(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = {'''vision_model''': vision_model, '''text_model''': text_model}
SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = model(input_ids=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _A ( self: Optional[Any] , _lowerCamelCase: Any , _lowerCamelCase: List[Any] , _lowerCamelCase: Optional[Any] , _lowerCamelCase: List[str] , _lowerCamelCase: str=None , **_lowerCamelCase: List[str] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.get_vision_text_model(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = {'''vision_model''': vision_model, '''text_model''': text_model}
SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = model(input_ids=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel.from_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = model(input_ids=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = after_output[0]
SCREAMING_SNAKE_CASE_ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCamelCase__ , 1E-3 )
def _A ( self: Tuple , _lowerCamelCase: List[Any] , _lowerCamelCase: Tuple , _lowerCamelCase: Tuple , _lowerCamelCase: List[str] , _lowerCamelCase: int=None , **_lowerCamelCase: Any ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.get_vision_text_model(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = {'''vision_model''': vision_model, '''text_model''': text_model}
SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = model(
input_ids=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , output_attentions=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = output.vision_model_output.attentions
self.assertEqual(len(UpperCamelCase__ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE_ = to_atuple(vision_model.config.image_size )
SCREAMING_SNAKE_CASE_ = to_atuple(vision_model.config.patch_size )
SCREAMING_SNAKE_CASE_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
SCREAMING_SNAKE_CASE_ = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
SCREAMING_SNAKE_CASE_ = output.text_model_output.attentions
self.assertEqual(len(UpperCamelCase__ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _A ( self: str , _lowerCamelCase: Optional[int] , _lowerCamelCase: List[str] , _lowerCamelCase: Any ):
pt_model.to(UpperCamelCase__ )
pt_model.eval()
# prepare inputs
SCREAMING_SNAKE_CASE_ = inputs_dict
SCREAMING_SNAKE_CASE_ = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = pt_model(**UpperCamelCase__ ).to_tuple()
SCREAMING_SNAKE_CASE_ = fx_model(**UpperCamelCase__ ).to_tuple()
self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(UpperCamelCase__ , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel.from_pretrained(UpperCamelCase__ , from_pt=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = fx_model_loaded(**UpperCamelCase__ ).to_tuple()
self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(UpperCamelCase__ , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = VisionTextDualEncoderModel.from_pretrained(UpperCamelCase__ , from_flax=UpperCamelCase__ )
pt_model_loaded.to(UpperCamelCase__ )
pt_model_loaded.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = pt_model_loaded(**UpperCamelCase__ ).to_tuple()
self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(UpperCamelCase__ , pt_output_loaded.numpy() , 4E-2 )
def _A ( self: List[str] , _lowerCamelCase: Union[str, Any] , _lowerCamelCase: Any , _lowerCamelCase: List[Any] ):
SCREAMING_SNAKE_CASE_ = VisionTextDualEncoderConfig.from_vision_text_configs(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = VisionTextDualEncoderModel(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = fx_state
self.check_pt_flax_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def _A ( self: Optional[Any] , _lowerCamelCase: List[Any] , _lowerCamelCase: Optional[int] , _lowerCamelCase: List[Any] ):
SCREAMING_SNAKE_CASE_ = VisionTextDualEncoderConfig.from_vision_text_configs(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = VisionTextDualEncoderModel(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = load_flax_weights_in_pytorch_model(UpperCamelCase__ , fx_model.params )
self.check_pt_flax_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def _A ( self: Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**UpperCamelCase__ )
def _A ( self: str ):
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**UpperCamelCase__ )
def _A ( self: Tuple ):
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
self.check_save_load(**UpperCamelCase__ )
def _A ( self: Optional[Any] ):
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**UpperCamelCase__ )
@is_pt_flax_cross_test
def _A ( self: Optional[int] ):
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ = config_inputs_dict.pop('''vision_config''' )
SCREAMING_SNAKE_CASE_ = config_inputs_dict.pop('''text_config''' )
SCREAMING_SNAKE_CASE_ = config_inputs_dict
self.check_equivalence_pt_to_flax(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
self.check_equivalence_flax_to_pt(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
@slow
def _A ( self: Any ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.get_pretrained_model_and_inputs()
SCREAMING_SNAKE_CASE_ = model_a(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel.from_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = model_a(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = after_outputs[0]
SCREAMING_SNAKE_CASE_ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCamelCase__ , 1E-5 )
@require_flax
class __magic_name__ ( _A , unittest.TestCase):
'''simple docstring'''
def _A ( self: Any ):
SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=UpperCamelCase__ , text_from_pt=UpperCamelCase__ , )
SCREAMING_SNAKE_CASE_ = 13
SCREAMING_SNAKE_CASE_ = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
SCREAMING_SNAKE_CASE_ = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
SCREAMING_SNAKE_CASE_ = random_attention_mask([batch_size, 4] )
SCREAMING_SNAKE_CASE_ = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def _A ( self: List[str] , _lowerCamelCase: Dict , _lowerCamelCase: int ):
SCREAMING_SNAKE_CASE_ = FlaxViTModel(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = FlaxBertModel(UpperCamelCase__ )
return vision_model, text_model
def _A ( self: List[str] ):
SCREAMING_SNAKE_CASE_ = FlaxViTModelTester(self )
SCREAMING_SNAKE_CASE_ = FlaxBertModelTester(self )
SCREAMING_SNAKE_CASE_ = vit_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ = bert_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = vision_config_and_inputs
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __magic_name__ ( _A , unittest.TestCase):
'''simple docstring'''
def _A ( self: Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-clip''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=UpperCamelCase__ , text_from_pt=UpperCamelCase__ , )
SCREAMING_SNAKE_CASE_ = 13
SCREAMING_SNAKE_CASE_ = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
SCREAMING_SNAKE_CASE_ = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
SCREAMING_SNAKE_CASE_ = random_attention_mask([batch_size, 4] )
SCREAMING_SNAKE_CASE_ = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def _A ( self: Dict , _lowerCamelCase: Union[str, Any] , _lowerCamelCase: Tuple ):
SCREAMING_SNAKE_CASE_ = FlaxCLIPVisionModel(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = FlaxBertModel(UpperCamelCase__ )
return vision_model, text_model
def _A ( self: Optional[Any] ):
SCREAMING_SNAKE_CASE_ = FlaxCLIPVisionModelTester(self )
SCREAMING_SNAKE_CASE_ = FlaxBertModelTester(self )
SCREAMING_SNAKE_CASE_ = clip_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ = bert_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = vision_config_and_inputs
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __magic_name__ ( unittest.TestCase):
'''simple docstring'''
@slow
def _A ( self: int ):
SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''' , logit_scale_init_value=1.0 )
SCREAMING_SNAKE_CASE_ = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
SCREAMING_SNAKE_CASE_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE_ = processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] , images=UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors='''np''' )
SCREAMING_SNAKE_CASE_ = model(**UpperCamelCase__ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
SCREAMING_SNAKE_CASE_ = np.array([[1.2_28_47_27, 0.3_10_41_22]] )
self.assertTrue(np.allclose(outputs.logits_per_image , UpperCamelCase__ , atol=1E-3 ) )
| 234 | '''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class lowercase_ ( _A ):
a_ = """"""
a_ = """hf-legacy""" # "hf://"" is reserved for hffs
def __init__( self , UpperCamelCase__ = None , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> Tuple:
"""simple docstring"""
super().__init__(self , **UpperCamelCase__ )
UpperCAmelCase_ = repo_info
UpperCAmelCase_ = token
UpperCAmelCase_ = None
def lowerCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
if self.dir_cache is None:
UpperCAmelCase_ = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
UpperCAmelCase_ = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(UpperCamelCase__ ): {"name": str(UpperCamelCase__ ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = "rb" , **UpperCamelCase__ , ) -> Optional[int]:
"""simple docstring"""
if not isinstance(self.repo_info , UpperCamelCase__ ):
raise NotImplementedError(F"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
UpperCAmelCase_ = hf_hub_url(self.repo_info.id , UpperCamelCase__ , revision=self.repo_info.sha )
return fsspec.open(
UpperCamelCase__ , mode=UpperCamelCase__ , headers=get_authentication_headers_for_url(UpperCamelCase__ , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open()
def lowerCamelCase_ ( self , UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
"""simple docstring"""
self._get_dirs()
UpperCAmelCase_ = self._strip_protocol(UpperCamelCase__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__=False , **UpperCamelCase__ ) -> str:
"""simple docstring"""
self._get_dirs()
UpperCAmelCase_ = PurePosixPath(path.strip("/" ) )
UpperCAmelCase_ = {}
for p, f in self.dir_cache.items():
UpperCAmelCase_ = PurePosixPath(p.strip("/" ) )
UpperCAmelCase_ = p.parent
if root == path:
UpperCAmelCase_ = f
UpperCAmelCase_ = list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out )
| 660 | 0 |
import argparse
import os
import re
__UpperCAmelCase = '''src/diffusers'''
# Pattern that looks at the indentation in a line.
__UpperCAmelCase = re.compile(R'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
__UpperCAmelCase = re.compile(R'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
__UpperCAmelCase = re.compile(R'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
__UpperCAmelCase = re.compile(R'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
__UpperCAmelCase = re.compile(R'\[([^\]]+)\]')
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = _re_indent.search(A_ )
return "" if search is None else search.groups()[0]
def lowercase__ ( __snake_case : Any , __snake_case : Dict="" , __snake_case : int=None , __snake_case : Tuple=None ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = 0
UpperCAmelCase_ : List[str] = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(A_ ):
index += 1
UpperCAmelCase_ : List[str] = ['\n'.join(lines[:index] )]
else:
UpperCAmelCase_ : Union[str, Any] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
UpperCAmelCase_ : Dict = [lines[index]]
index += 1
while index < len(A_ ) and (end_prompt is None or not lines[index].startswith(A_ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(A_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(A_ ) )
if index < len(A_ ) - 1:
UpperCAmelCase_ : int = [lines[index + 1]]
index += 1
else:
UpperCAmelCase_ : Tuple = []
else:
blocks.append('\n'.join(A_ ) )
UpperCAmelCase_ : str = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(A_ ) > 0:
blocks.append('\n'.join(A_ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(A_ ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
def _inner(__snake_case : Tuple ):
return key(A_ ).lower().replace('_' , '' )
return _inner
def lowercase__ ( __snake_case : List[str] , __snake_case : Any=None ):
'''simple docstring'''
def noop(__snake_case : int ):
return x
if key is None:
UpperCAmelCase_ : List[Any] = noop
# Constants are all uppercase, they go first.
UpperCAmelCase_ : Dict = [obj for obj in objects if key(A_ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
UpperCAmelCase_ : Any = [obj for obj in objects if key(A_ )[0].isupper() and not key(A_ ).isupper()]
# Functions begin with a lowercase, they go last.
UpperCAmelCase_ : Optional[int] = [obj for obj in objects if not key(A_ )[0].isupper()]
UpperCAmelCase_ : int = ignore_underscore(A_ )
return sorted(A_ , key=A_ ) + sorted(A_ , key=A_ ) + sorted(A_ , key=A_ )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
def _replace(__snake_case : int ):
UpperCAmelCase_ : Any = match.groups()[0]
if "," not in imports:
return F"[{imports}]"
UpperCAmelCase_ : Optional[Any] = [part.strip().replace('\"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
UpperCAmelCase_ : Optional[Any] = keys[:-1]
return "[" + ", ".join([F"\"{k}\"" for k in sort_objects(A_ )] ) + "]"
UpperCAmelCase_ : int = import_statement.split('\n' )
if len(A_ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
UpperCAmelCase_ : Any = 2 if lines[1].strip() == '[' else 1
UpperCAmelCase_ : str = [(i, _re_strip_line.search(A_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
UpperCAmelCase_ : Any = sort_objects(A_ , key=lambda __snake_case : x[1] )
UpperCAmelCase_ : List[str] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(A_ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
UpperCAmelCase_ : int = _re_bracket_content.sub(_replace , lines[1] )
else:
UpperCAmelCase_ : int = [part.strip().replace('\"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
UpperCAmelCase_ : str = keys[:-1]
UpperCAmelCase_ : Optional[int] = get_indent(lines[1] ) + ', '.join([F"\"{k}\"" for k in sort_objects(A_ )] )
return "\n".join(A_ )
else:
# Finally we have to deal with imports fitting on one line
UpperCAmelCase_ : Tuple = _re_bracket_content.sub(_replace , A_ )
return import_statement
def lowercase__ ( __snake_case : List[Any] , __snake_case : Tuple=True ):
'''simple docstring'''
with open(A_ , 'r' ) as f:
UpperCAmelCase_ : Optional[Any] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
UpperCAmelCase_ : str = split_code_in_indented_blocks(
A_ , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(A_ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
UpperCAmelCase_ : Optional[Any] = main_blocks[block_idx]
UpperCAmelCase_ : List[str] = block.split('\n' )
# Get to the start of the imports.
UpperCAmelCase_ : List[str] = 0
while line_idx < len(A_ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
UpperCAmelCase_ : Tuple = len(A_ )
else:
line_idx += 1
if line_idx >= len(A_ ):
continue
# Ignore beginning and last line: they don't contain anything.
UpperCAmelCase_ : Optional[Any] = '\n'.join(block_lines[line_idx:-1] )
UpperCAmelCase_ : List[str] = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
UpperCAmelCase_ : Optional[Any] = split_code_in_indented_blocks(A_ , indent_level=A_ )
# We have two categories of import key: list or _import_structure[key].append/extend
UpperCAmelCase_ : int = _re_direct_key if '_import_structure' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
UpperCAmelCase_ : int = [(pattern.search(A_ ).groups()[0] if pattern.search(A_ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
UpperCAmelCase_ : List[str] = [(i, key) for i, key in enumerate(A_ ) if key is not None]
UpperCAmelCase_ : Union[str, Any] = [x[0] for x in sorted(A_ , key=lambda __snake_case : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
UpperCAmelCase_ : Tuple = 0
UpperCAmelCase_ : List[Any] = []
for i in range(len(A_ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
UpperCAmelCase_ : Tuple = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(A_ )
count += 1
# And we put our main block back together with its first and last line.
UpperCAmelCase_ : List[str] = '\n'.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(A_ ):
if check_only:
return True
else:
print(F"Overwriting {file}." )
with open(A_ , 'w' ) as f:
f.write('\n'.join(A_ ) )
def lowercase__ ( __snake_case : str=True ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = []
for root, _, files in os.walk(A_ ):
if "__init__.py" in files:
UpperCAmelCase_ : Tuple = sort_imports(os.path.join(A_ , '__init__.py' ) , check_only=A_ )
if result:
UpperCAmelCase_ : Union[str, Any] = [os.path.join(A_ , '__init__.py' )]
if len(A_ ) > 0:
raise ValueError(F"Would overwrite {len(A_ )} files, run `make style`." )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
__UpperCAmelCase = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 406 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case : Union[str, Any] = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = ['''PLBartTokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[int] = [
'''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PLBartForCausalLM''',
'''PLBartForConditionalGeneration''',
'''PLBartForSequenceClassification''',
'''PLBartModel''',
'''PLBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
__snake_case : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 660 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {'''openai-gpt''': '''https://huggingface.co/openai-gpt/resolve/main/config.json'''}
class a__ ( _A ):
lowercase_ = "openai-gpt"
lowercase_ = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Tuple , UpperCamelCase_ : Tuple=40478 , UpperCamelCase_ : int=512 , UpperCamelCase_ : str=768 , UpperCamelCase_ : List[str]=12 , UpperCamelCase_ : Union[str, Any]=12 , UpperCamelCase_ : Union[str, Any]="gelu" , UpperCamelCase_ : Tuple=0.1 , UpperCamelCase_ : Tuple=0.1 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : Dict=1e-5 , UpperCamelCase_ : int=0.02 , UpperCamelCase_ : Optional[Any]="cls_index" , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : Any=None , UpperCamelCase_ : Any=True , UpperCamelCase_ : Optional[int]=0.1 , **UpperCamelCase_ : List[Any] , ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = vocab_size
__UpperCAmelCase : Dict = n_positions
__UpperCAmelCase : Dict = n_embd
__UpperCAmelCase : Optional[Any] = n_layer
__UpperCAmelCase : Union[str, Any] = n_head
__UpperCAmelCase : Optional[Any] = afn
__UpperCAmelCase : Optional[Any] = resid_pdrop
__UpperCAmelCase : str = embd_pdrop
__UpperCAmelCase : Tuple = attn_pdrop
__UpperCAmelCase : List[str] = layer_norm_epsilon
__UpperCAmelCase : Optional[int] = initializer_range
__UpperCAmelCase : Any = summary_type
__UpperCAmelCase : Any = summary_use_proj
__UpperCAmelCase : Optional[Any] = summary_activation
__UpperCAmelCase : Any = summary_first_dropout
__UpperCAmelCase : Tuple = summary_proj_to_labels
super().__init__(**UpperCamelCase__)
| 77 | '''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__snake_case : List[str] = logging.get_logger(__name__)
class lowercase_ ( _A ):
a_ = """linear"""
a_ = """cosine"""
a_ = """cosine_with_restarts"""
a_ = """polynomial"""
a_ = """constant"""
a_ = """constant_with_warmup"""
a_ = """piecewise_constant"""
def lowerCamelCase__ ( A_ , A_ = -1 ):
return LambdaLR(A_ , lambda A_ : 1 , last_epoch=A_ )
def lowerCamelCase__ ( A_ , A_ , A_ = -1 ):
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1.0 , A_ ) )
return 1.0
return LambdaLR(A_ , A_ , last_epoch=A_ )
def lowerCamelCase__ ( A_ , A_ , A_ = -1 ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = step_rules.split("," )
for rule_str in rule_list[:-1]:
UpperCAmelCase_ , UpperCAmelCase_ = rule_str.split(":" )
UpperCAmelCase_ = int(A_ )
UpperCAmelCase_ = float(A_ )
UpperCAmelCase_ = value
UpperCAmelCase_ = float(rule_list[-1] )
def create_rules_function(A_ , A_ ):
def rule_func(A_ ) -> float:
UpperCAmelCase_ = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(A_ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
UpperCAmelCase_ = create_rules_function(A_ , A_ )
return LambdaLR(A_ , A_ , last_epoch=A_ )
def lowerCamelCase__ ( A_ , A_ , A_ , A_=-1 ):
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1 , A_ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(A_ , A_ , A_ )
def lowerCamelCase__ ( A_ , A_ , A_ , A_ = 0.5 , A_ = -1 ):
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1 , A_ ) )
UpperCAmelCase_ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(A_ ) * 2.0 * progress )) )
return LambdaLR(A_ , A_ , A_ )
def lowerCamelCase__ ( A_ , A_ , A_ , A_ = 1 , A_ = -1 ):
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1 , A_ ) )
UpperCAmelCase_ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(A_ ) * progress) % 1.0) )) )
return LambdaLR(A_ , A_ , A_ )
def lowerCamelCase__ ( A_ , A_ , A_ , A_=1e-7 , A_=1.0 , A_=-1 ):
UpperCAmelCase_ = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1 , A_ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
UpperCAmelCase_ = lr_init - lr_end
UpperCAmelCase_ = num_training_steps - num_warmup_steps
UpperCAmelCase_ = 1 - (current_step - num_warmup_steps) / decay_steps
UpperCAmelCase_ = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(A_ , A_ , A_ )
__snake_case : str = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowerCamelCase__ ( A_ , A_ , A_ = None , A_ = None , A_ = None , A_ = 1 , A_ = 1.0 , A_ = -1 , ):
UpperCAmelCase_ = SchedulerType(A_ )
UpperCAmelCase_ = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(A_ , last_epoch=A_ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(A_ , step_rules=A_ , last_epoch=A_ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(A_ , num_warmup_steps=A_ , last_epoch=A_ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
A_ , num_warmup_steps=A_ , num_training_steps=A_ , num_cycles=A_ , last_epoch=A_ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
A_ , num_warmup_steps=A_ , num_training_steps=A_ , power=A_ , last_epoch=A_ , )
return schedule_func(
A_ , num_warmup_steps=A_ , num_training_steps=A_ , last_epoch=A_ )
| 660 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.