code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
from __future__ import annotations
from PIL import Image
# Define glider example
A_ = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
A_ = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def UpperCAmelCase__ (snake_case__ : list[list[int]] ):
"""simple docstring"""
_snake_case : Union[str, Any] = []
for i in range(len(snake_case__ ) ):
_snake_case : int = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
_snake_case : Union[str, Any] = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(snake_case__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(snake_case__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(snake_case__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
_snake_case : Optional[int] = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(snake_case__ )
return next_generation
def UpperCAmelCase__ (snake_case__ : list[list[int]] , snake_case__ : int ):
"""simple docstring"""
_snake_case : List[str] = []
for _ in range(snake_case__ ):
# Create output image
_snake_case : Any = Image.new("""RGB""" , (len(cells[0] ), len(snake_case__ )) )
_snake_case : List[Any] = img.load()
# Save cells to image
for x in range(len(snake_case__ ) ):
for y in range(len(cells[0] ) ):
_snake_case : List[str] = 2_55 - cells[y][x] * 2_55
_snake_case : Union[str, Any] = (colour, colour, colour)
# Save image
images.append(snake_case__ )
_snake_case : Tuple = new_generation(snake_case__ )
return images
if __name__ == "__main__":
A_ = generate_images(GLIDER, 16)
images[0].save('''out.gif''', save_all=True, append_images=images[1:])
| 609
|
"""simple docstring"""
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
A_ = logging.get_logger(__name__)
A_ = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
'''constant''': get_constant_schedule,
'''constant_w_warmup''': get_constant_schedule_with_warmup,
}
class lowercase( __a ):
'''simple docstring'''
def __init__( self: List[str], a_: Dict=None, a_: int=None, *a_: List[Any], **a_: Union[str, Any] ):
'''simple docstring'''
super().__init__(*a_, **a_ )
if config is None:
assert isinstance(self.model, a_ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f" {self.model.__class__}"
)
_snake_case : Any = self.model.config
else:
_snake_case : int = config
_snake_case : Union[str, Any] = data_args
_snake_case : Union[str, Any] = self.config.tgt_vocab_size if isinstance(self.config, a_ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f"The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"
""" padding..""" )
if self.args.label_smoothing == 0:
_snake_case : Tuple = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_snake_case : Dict = label_smoothed_nll_loss
def UpperCamelCase_ ( self: int, a_: int ):
'''simple docstring'''
if self.optimizer is None:
_snake_case : Optional[Any] = ["""bias""", """LayerNorm.weight"""]
_snake_case : Optional[Any] = [
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
_snake_case : int = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_snake_case : str = Adafactor
_snake_case : List[Any] = {"""scale_parameter""": False, """relative_step""": False}
else:
_snake_case : Any = AdamW
_snake_case : Tuple = {
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
_snake_case : List[Any] = self.args.learning_rate
if self.sharded_ddp:
_snake_case : Dict = OSS(
params=a_, optim=a_, **a_, )
else:
_snake_case : Union[str, Any] = optimizer_cls(a_, **a_ )
if self.lr_scheduler is None:
_snake_case : Optional[int] = self._get_lr_scheduler(a_ )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def UpperCamelCase_ ( self: Dict, a_: List[str] ):
'''simple docstring'''
_snake_case : Union[str, Any] = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_snake_case : Union[str, Any] = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_snake_case : List[Any] = schedule_func(self.optimizer, num_warmup_steps=self.args.warmup_steps )
else:
_snake_case : Tuple = schedule_func(
self.optimizer, num_warmup_steps=self.args.warmup_steps, num_training_steps=a_ )
return scheduler
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
if isinstance(self.train_dataset, torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size, distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED), )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def UpperCamelCase_ ( self: List[str], a_: int, a_: Optional[int], a_: str ):
'''simple docstring'''
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_snake_case : int = model(**a_, use_cache=a_ )[0]
_snake_case : Union[str, Any] = self.loss_fn(logits.view(-1, logits.shape[-1] ), labels.view(-1 ) )
else:
# compute usual loss via models
_snake_case , _snake_case : Optional[Any] = model(**a_, labels=a_, use_cache=a_ )[:2]
else:
# compute label smoothed loss
_snake_case : Union[str, Any] = model(**a_, use_cache=a_ )[0]
_snake_case : Optional[Any] = torch.nn.functional.log_softmax(a_, dim=-1 )
_snake_case , _snake_case : List[Any] = self.loss_fn(a_, a_, self.args.label_smoothing, ignore_index=self.config.pad_token_id )
return loss, logits
def UpperCamelCase_ ( self: List[str], a_: List[Any], a_: Union[str, Any] ):
'''simple docstring'''
_snake_case : Any = inputs.pop("""labels""" )
_snake_case , _snake_case : str = self._compute_loss(a_, a_, a_ )
return loss
def UpperCamelCase_ ( self: Optional[int], a_: nn.Module, a_: Dict[str, Union[torch.Tensor, Any]], a_: bool, a_: Optional[List[str]] = None, ):
'''simple docstring'''
_snake_case : str = self._prepare_inputs(a_ )
_snake_case : List[str] = {
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_snake_case : List[str] = self.model.generate(
inputs["""input_ids"""], attention_mask=inputs["""attention_mask"""], **a_, )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_snake_case : Union[str, Any] = self._pad_tensors_to_max_len(a_, gen_kwargs["""max_length"""] )
_snake_case : Tuple = inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
_snake_case , _snake_case : Dict = self._compute_loss(a_, a_, a_ )
_snake_case : int = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_snake_case : Optional[Any] = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_snake_case : Tuple = self._pad_tensors_to_max_len(a_, gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def UpperCamelCase_ ( self: Tuple, a_: List[str], a_: Union[str, Any] ):
'''simple docstring'''
_snake_case : Dict = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
f" padded to `max_length`={max_length}" )
_snake_case : List[str] = pad_token_id * torch.ones(
(tensor.shape[0], max_length), dtype=tensor.dtype, device=tensor.device )
_snake_case : Tuple = tensor
return padded_tensor
| 609
| 1
|
'''simple docstring'''
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class __A ( a ):
"""simple docstring"""
A_ = 'new-model'
if is_tf_available():
class __A ( a ):
"""simple docstring"""
A_ = NewModelConfig
@require_tf
class __A ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case_( self )-> Any:
lowercase__ = '''bert-base-cased'''
lowercase__ = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
lowercase__ = TFAutoModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
@slow
def snake_case_( self )-> Optional[int]:
lowercase__ = '''bert-base-cased'''
lowercase__ = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
lowercase__ = TFAutoModelForPreTraining.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
@slow
def snake_case_( self )-> Optional[int]:
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
lowercase__ = TFAutoModelForCausalLM.from_pretrained(_lowerCamelCase )
lowercase__ , lowercase__ = TFAutoModelForCausalLM.from_pretrained(_lowerCamelCase , output_loading_info=_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
@slow
def snake_case_( self )-> int:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
lowercase__ = TFAutoModelWithLMHead.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
@slow
def snake_case_( self )-> Union[str, Any]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
lowercase__ = TFAutoModelForMaskedLM.from_pretrained(_lowerCamelCase )
lowercase__ , lowercase__ = TFAutoModelForMaskedLM.from_pretrained(_lowerCamelCase , output_loading_info=_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
@slow
def snake_case_( self )-> Any:
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
lowercase__ = TFAutoModelForSeqaSeqLM.from_pretrained(_lowerCamelCase )
lowercase__ , lowercase__ = TFAutoModelForSeqaSeqLM.from_pretrained(_lowerCamelCase , output_loading_info=_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
@slow
def snake_case_( self )-> str:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
lowercase__ = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
lowercase__ = TFAutoModelForSequenceClassification.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
@slow
def snake_case_( self )-> Optional[int]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
lowercase__ = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
lowercase__ = TFAutoModelForQuestionAnswering.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
@slow
@require_tensorflow_probability
def snake_case_( self )-> List[str]:
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
lowercase__ = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
lowercase__ = TFAutoModelForTableQuestionAnswering.from_pretrained(_lowerCamelCase )
lowercase__ , lowercase__ = TFAutoModelForTableQuestionAnswering.from_pretrained(
_lowerCamelCase , output_loading_info=_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
def snake_case_( self )-> List[Any]:
lowercase__ = TFAutoModelWithLMHead.from_pretrained(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=_lowerCamelCase ) , 1_4_4_1_0 )
def snake_case_( self )-> Tuple:
lowercase__ = TFAutoModelWithLMHead.from_pretrained(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=_lowerCamelCase ) , 1_4_4_1_0 )
def snake_case_( self )-> str:
# For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel
lowercase__ = TFAutoModel.from_pretrained('''sgugger/funnel-random-tiny''' )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
lowercase__ = copy.deepcopy(model.config )
lowercase__ = ['''FunnelBaseModel''']
lowercase__ = TFAutoModel.from_config(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_lowerCamelCase )
lowercase__ = TFAutoModel.from_pretrained(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
def snake_case_( self )-> int:
try:
AutoConfig.register('''new-model''' , _lowerCamelCase )
lowercase__ = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(_lowerCamelCase ):
auto_class.register(_lowerCamelCase , _lowerCamelCase )
auto_class.register(_lowerCamelCase , _lowerCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_lowerCamelCase ):
auto_class.register(_lowerCamelCase , _lowerCamelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
lowercase__ = BertModelTester(self ).get_config()
lowercase__ = NewModelConfig(**tiny_config.to_dict() )
lowercase__ = auto_class.from_config(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_lowerCamelCase )
lowercase__ = auto_class.from_pretrained(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def snake_case_( self )-> int:
with self.assertRaisesRegex(
_lowerCamelCase , '''bert-base is not a local folder and is not a valid model identifier''' ):
lowercase__ = TFAutoModel.from_pretrained('''bert-base''' )
def snake_case_( self )-> int:
with self.assertRaisesRegex(
_lowerCamelCase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
lowercase__ = TFAutoModel.from_pretrained(_lowerCamelCase , revision='''aaaaaa''' )
def snake_case_( self )-> List[str]:
with self.assertRaisesRegex(
_lowerCamelCase , '''hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin''' , ):
lowercase__ = TFAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' )
def snake_case_( self )-> int:
with self.assertRaisesRegex(_lowerCamelCase , '''Use `from_pt=True` to load this model''' ):
lowercase__ = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
def snake_case_( self )-> Union[str, Any]:
# Make sure we have cached the model.
lowercase__ = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
lowercase__ = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
lowercase__ = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
with RequestCounter() as counter:
lowercase__ = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 318
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_lowerCAmelCase = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def _lowerCAmelCase ( lowercase : Any ) ->Any:
"""simple docstring"""
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def _lowerCAmelCase ( lowercase : Optional[Any] ) ->List[Any]:
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase )
def _lowerCAmelCase ( lowercase : str ) ->Optional[Any]:
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
lowercase__ = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(lowercase , id=lowercase )
def _lowerCAmelCase ( lowercase : Any , lowercase : Dict ) ->Tuple:
"""simple docstring"""
if exitstatus == 5:
lowercase__ = 0
# Doctest custom flag to ignore output.
_lowerCAmelCase = doctest.register_optionflag("IGNORE_RESULT")
_lowerCAmelCase = doctest.OutputChecker
class __A ( a ):
"""simple docstring"""
def snake_case_( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )-> int:
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase = CustomOutputChecker
_lowerCAmelCase = HfDoctestModule
_lowerCAmelCase = HfDocTestParser
| 318
| 1
|
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCamelCase : str = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 87
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
"""google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''mobilenet_v1'''
def __init__( self : Optional[int] , UpperCAmelCase__ : Optional[int]=3 , UpperCAmelCase__ : Optional[Any]=224 , UpperCAmelCase__ : Optional[int]=1.0 , UpperCAmelCase__ : Optional[int]=8 , UpperCAmelCase__ : Tuple="relu6" , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Dict=0.999 , UpperCAmelCase__ : str=0.02 , UpperCAmelCase__ : Optional[int]=0.001 , **UpperCAmelCase__ : Dict , ) ->List[str]:
'''simple docstring'''
super().__init__(**UpperCAmelCase__)
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''')
A__ = num_channels
A__ = image_size
A__ = depth_multiplier
A__ = min_depth
A__ = hidden_act
A__ = tf_padding
A__ = classifier_dropout_prob
A__ = initializer_range
A__ = layer_norm_eps
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE ( self : Any) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict([('''pixel_values''', {0: '''batch'''})])
@property
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})])
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})])
@property
def SCREAMING_SNAKE_CASE ( self : int) ->float:
'''simple docstring'''
return 1e-4
| 87
| 1
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=snake_case )
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : str = field(default='''text-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
__lowerCamelCase : ClassVar[Features] = Features({'''text''': Value('''string''' )} )
__lowerCamelCase : ClassVar[Features] = Features({'''labels''': ClassLabel} )
__lowerCamelCase : str = "text"
__lowerCamelCase : str = "labels"
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] ,SCREAMING_SNAKE_CASE_ ):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" )
snake_case : Optional[Any] = copy.deepcopy(self )
snake_case : List[Any] = self.label_schema.copy()
snake_case : int = features[self.label_column]
snake_case : Tuple = label_schema
return task_template
@property
def snake_case_ ( self ):
'''simple docstring'''
return {
self.text_column: "text",
self.label_column: "labels",
}
| 315
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
__lowercase : Optional[int] = logging.get_logger(__name__)
__lowercase : Any = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all MVP models at https://huggingface.co/models?filter=mvp
__lowercase : Dict = {
'''vocab_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''',
},
'''added_tokens.json''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''',
},
'''merges_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''',
},
}
__lowercase : Union[str, Any] = {
'''RUCAIBox/mvp''': 1_024,
}
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES
__lowerCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Optional[int] = ['''input_ids''', '''attention_mask''']
__lowerCamelCase : int = MvpTokenizer
def __init__( self ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_="replace" ,SCREAMING_SNAKE_CASE_="<s>" ,SCREAMING_SNAKE_CASE_="</s>" ,SCREAMING_SNAKE_CASE_="</s>" ,SCREAMING_SNAKE_CASE_="<s>" ,SCREAMING_SNAKE_CASE_="<unk>" ,SCREAMING_SNAKE_CASE_="<pad>" ,SCREAMING_SNAKE_CASE_="<mask>" ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_=True ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,tokenizer_file=SCREAMING_SNAKE_CASE_ ,errors=SCREAMING_SNAKE_CASE_ ,bos_token=SCREAMING_SNAKE_CASE_ ,eos_token=SCREAMING_SNAKE_CASE_ ,sep_token=SCREAMING_SNAKE_CASE_ ,cls_token=SCREAMING_SNAKE_CASE_ ,unk_token=SCREAMING_SNAKE_CASE_ ,pad_token=SCREAMING_SNAKE_CASE_ ,mask_token=SCREAMING_SNAKE_CASE_ ,add_prefix_space=SCREAMING_SNAKE_CASE_ ,trim_offsets=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ,)
snake_case : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" ,SCREAMING_SNAKE_CASE_ ) != add_prefix_space:
snake_case : Dict = getattr(SCREAMING_SNAKE_CASE_ ,pre_tok_state.pop("""type""" ) )
snake_case : str = add_prefix_space
snake_case : Optional[Any] = pre_tok_class(**SCREAMING_SNAKE_CASE_ )
snake_case : Tuple = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case : str = """post_processor"""
snake_case : int = getattr(self.backend_tokenizer ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
if tokenizer_component_instance:
snake_case : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case : List[str] = tuple(state["""sep"""] )
if "cls" in state:
snake_case : int = tuple(state["""cls"""] )
snake_case : Any = False
if state.get("""add_prefix_space""" ,SCREAMING_SNAKE_CASE_ ) != add_prefix_space:
snake_case : Any = add_prefix_space
snake_case : Union[str, Any] = True
if state.get("""trim_offsets""" ,SCREAMING_SNAKE_CASE_ ) != trim_offsets:
snake_case : List[Any] = trim_offsets
snake_case : int = True
if changes_to_apply:
snake_case : Optional[Any] = getattr(SCREAMING_SNAKE_CASE_ ,state.pop("""type""" ) )
snake_case : str = component_class(**SCREAMING_SNAKE_CASE_ )
setattr(self.backend_tokenizer ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
@property
def snake_case_ ( self ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : List[str] = AddedToken(SCREAMING_SNAKE_CASE_ ,lstrip=SCREAMING_SNAKE_CASE_ ,rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) else value
snake_case : str = value
def snake_case_ ( self ,*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Optional[Any] = kwargs.get("""is_split_into_words""" ,SCREAMING_SNAKE_CASE_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : List[Any] = kwargs.get("""is_split_into_words""" ,SCREAMING_SNAKE_CASE_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
snake_case : List[Any] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ ,name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=None ):
'''simple docstring'''
snake_case : Tuple = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
snake_case : Dict = [self.sep_token_id]
snake_case : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 315
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
__UpperCamelCase : Optional[int] = {
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = "timesformer"
def __init__( self: Optional[Any] , UpperCamelCase: List[str]=2_24 , UpperCamelCase: Tuple=16 , UpperCamelCase: Optional[int]=3 , UpperCamelCase: Optional[int]=8 , UpperCamelCase: str=7_68 , UpperCamelCase: Dict=12 , UpperCamelCase: Dict=12 , UpperCamelCase: Optional[Any]=30_72 , UpperCamelCase: Union[str, Any]="gelu" , UpperCamelCase: Optional[int]=0.0 , UpperCamelCase: Tuple=0.0 , UpperCamelCase: List[Any]=0.02 , UpperCamelCase: List[Any]=1e-6 , UpperCamelCase: Tuple=True , UpperCamelCase: Optional[Any]="divided_space_time" , UpperCamelCase: int=0 , **UpperCamelCase: str , ) -> str:
super().__init__(**lowerCamelCase__ )
snake_case__ = image_size
snake_case__ = patch_size
snake_case__ = num_channels
snake_case__ = num_frames
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = initializer_range
snake_case__ = layer_norm_eps
snake_case__ = qkv_bias
snake_case__ = attention_type
snake_case__ = drop_path_rate
| 328
|
'''simple docstring'''
from pathlib import Path
import fire
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : int = Path(UpperCAmelCase_ )
_UpperCamelCase : str = Path(UpperCAmelCase_ )
dest_dir.mkdir(exist_ok=UpperCAmelCase_ )
for path in src_dir.iterdir():
_UpperCamelCase : int = [x.rstrip() for x in list(path.open().readlines() )][:n]
_UpperCamelCase : Any = dest_dir.joinpath(path.name )
print(UpperCAmelCase_ )
dest_path.open('w' ).write('\n'.join(UpperCAmelCase_ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 195
| 0
|
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError('Input value must be an \'int\' type' )
__lowerCamelCase : Optional[Any] = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 230
|
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 230
| 1
|
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class __lowerCAmelCase :
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=99 , lowerCAmelCase=32 , lowerCAmelCase=2 , lowerCAmelCase=4 , lowerCAmelCase=37 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=512 , lowerCAmelCase=16 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase="None" , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , ) -> Tuple:
'''simple docstring'''
_lowercase =parent
_lowercase =batch_size
_lowercase =seq_length
_lowercase =is_training
_lowercase =use_input_mask
_lowercase =use_token_type_ids
_lowercase =use_labels
_lowercase =vocab_size
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =max_position_embeddings
_lowercase =type_vocab_size
_lowercase =type_sequence_label_size
_lowercase =initializer_range
_lowercase =num_labels
_lowercase =num_choices
_lowercase =relative_attention
_lowercase =position_biased_input
_lowercase =pos_att_type
_lowercase =scope
def A__ ( self ) -> List[Any]:
'''simple docstring'''
_lowercase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase =None
if self.use_input_mask:
_lowercase =random_attention_mask([self.batch_size, self.seq_length] )
_lowercase =None
if self.use_token_type_ids:
_lowercase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase =None
_lowercase =None
_lowercase =None
if self.use_labels:
_lowercase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase =DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=__UpperCamelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Dict:
'''simple docstring'''
_lowercase =TFDebertaVaModel(config=__UpperCamelCase )
_lowercase ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase =[input_ids, input_mask]
_lowercase =model(__UpperCamelCase )
_lowercase =model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> List[str]:
'''simple docstring'''
_lowercase =TFDebertaVaForMaskedLM(config=__UpperCamelCase )
_lowercase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_lowercase =model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Tuple:
'''simple docstring'''
_lowercase =self.num_labels
_lowercase =TFDebertaVaForSequenceClassification(config=__UpperCamelCase )
_lowercase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_lowercase =model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> int:
'''simple docstring'''
_lowercase =self.num_labels
_lowercase =TFDebertaVaForTokenClassification(config=__UpperCamelCase )
_lowercase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_lowercase =model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> str:
'''simple docstring'''
_lowercase =TFDebertaVaForQuestionAnswering(config=__UpperCamelCase )
_lowercase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_lowercase =model(__UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self ) -> Tuple:
'''simple docstring'''
_lowercase =self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) =config_and_inputs
_lowercase ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
_a = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
_a = (
{
"""feature-extraction""": TFDebertaVaModel,
"""fill-mask""": TFDebertaVaForMaskedLM,
"""question-answering""": TFDebertaVaForQuestionAnswering,
"""text-classification""": TFDebertaVaForSequenceClassification,
"""token-classification""": TFDebertaVaForTokenClassification,
"""zero-shot""": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
_a = False
_a = False
def A__ ( self ) -> Any:
'''simple docstring'''
_lowercase =TFDebertaVaModelTester(self )
_lowercase =ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self ) -> str:
'''simple docstring'''
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def A__ ( self ) -> Any:
'''simple docstring'''
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase )
def A__ ( self ) -> List[str]:
'''simple docstring'''
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCamelCase )
def A__ ( self ) -> Dict:
'''simple docstring'''
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCamelCase )
def A__ ( self ) -> int:
'''simple docstring'''
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase )
@slow
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
_lowercase =TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge' )
self.assertIsNotNone(__UpperCamelCase )
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet' )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
pass
@slow
def A__ ( self ) -> str:
'''simple docstring'''
_lowercase =TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge' )
_lowercase =tf.constant([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
_lowercase =tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_lowercase =model(__UpperCamelCase , attention_mask=__UpperCamelCase )[0]
_lowercase =tf.constant(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , __UpperCamelCase , atol=1e-4 )
| 291
|
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
lowercase = logging.getLogger(__name__)
lowercase = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
lowercase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __A:
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(UpperCAmelCase )} , )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
SCREAMING_SNAKE_CASE = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def lowercase__ ( self : str ):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"""--config_overrides can't be used in combination with --config_name or --model_name_or_path""" )
@dataclass
class __A:
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
SCREAMING_SNAKE_CASE = field(default=UpperCAmelCase , metadata={'''help''': '''The input training data file (a text file).'''} )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''} , )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''} , )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
SCREAMING_SNAKE_CASE = field(
default=5 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
} , )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
SCREAMING_SNAKE_CASE = field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
def lowercase__ ( self : Tuple ):
if self.train_file is not None:
lowerCamelCase_ = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
lowerCamelCase_ = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def __lowerCAmelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple ) -> Dict:
with open(UpperCAmelCase__ , """r""" , encoding="""utf-8""" ) as f:
lowerCamelCase_ = [json.loads(UpperCAmelCase__ ) for line in f.read().splitlines() if (len(UpperCAmelCase__ ) > 0 and not line.isspace())]
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
lowerCamelCase_ = {c: dataset[c] for c in dataset.column_names}
lowerCamelCase_ = refs
return Dataset.from_dict(UpperCAmelCase__ )
def __lowerCAmelCase ( ) -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowerCamelCase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , UpperCAmelCase__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase_ = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
lowerCamelCase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''train[:{data_args.validation_split_percentage}%]''' , )
lowerCamelCase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''train[{data_args.validation_split_percentage}%:]''' , )
else:
lowerCamelCase_ = {}
if data_args.train_file is not None:
lowerCamelCase_ = data_args.train_file
if data_args.validation_file is not None:
lowerCamelCase_ = data_args.validation_file
lowerCamelCase_ = data_args.train_file.split(""".""" )[-1]
if extension == "txt":
lowerCamelCase_ = """text"""
lowerCamelCase_ = load_dataset(UpperCAmelCase__ , data_files=UpperCAmelCase__ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowerCamelCase_ = AutoConfig.from_pretrained(model_args.config_name , **UpperCAmelCase__ )
elif model_args.model_name_or_path:
lowerCamelCase_ = AutoConfig.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ )
else:
lowerCamelCase_ = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
lowerCamelCase_ = {
"""cache_dir""": model_args.cache_dir,
"""use_fast""": model_args.use_fast_tokenizer,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
lowerCamelCase_ = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **UpperCAmelCase__ )
elif model_args.model_name_or_path:
lowerCamelCase_ = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name.""" )
if model_args.model_name_or_path:
lowerCamelCase_ = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
lowerCamelCase_ = AutoModelForMaskedLM.from_config(UpperCAmelCase__ )
model.resize_token_embeddings(len(UpperCAmelCase__ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
lowerCamelCase_ = datasets["""train"""].column_names
else:
lowerCamelCase_ = datasets["""validation"""].column_names
lowerCamelCase_ = """text""" if """text""" in column_names else column_names[0]
lowerCamelCase_ = """max_length""" if data_args.pad_to_max_length else False
def tokenize_function(UpperCAmelCase__ : Any ):
# Remove empty lines
lowerCamelCase_ = [line for line in examples["""text"""] if len(UpperCAmelCase__ ) > 0 and not line.isspace()]
return tokenizer(examples["""text"""] , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=data_args.max_seq_length )
lowerCamelCase_ = datasets.map(
UpperCAmelCase__ , batched=UpperCAmelCase__ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
lowerCamelCase_ = add_chinese_references(tokenized_datasets["""train"""] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
lowerCamelCase_ = add_chinese_references(
tokenized_datasets["""validation"""] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
lowerCamelCase_ = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
lowerCamelCase_ = False
# Data collator
# This one will take care of randomly masking the tokens.
lowerCamelCase_ = DataCollatorForWholeWordMask(tokenizer=UpperCAmelCase__ , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowerCamelCase_ = Trainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=tokenized_datasets["""train"""] if training_args.do_train else None , eval_dataset=tokenized_datasets["""validation"""] if training_args.do_eval else None , tokenizer=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowerCamelCase_ = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
lowerCamelCase_ = model_args.model_name_or_path
else:
lowerCamelCase_ = None
lowerCamelCase_ = trainer.train(resume_from_checkpoint=UpperCAmelCase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCamelCase_ = os.path.join(training_args.output_dir , """train_results.txt""" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , """w""" ) as writer:
logger.info("""***** Train results *****""" )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F''' {key} = {value}''' )
writer.write(F'''{key} = {value}\n''' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# Evaluation
lowerCamelCase_ = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowerCamelCase_ = trainer.evaluate()
lowerCamelCase_ = math.exp(eval_output["""eval_loss"""] )
lowerCamelCase_ = perplexity
lowerCamelCase_ = os.path.join(training_args.output_dir , """eval_results_mlm_wwm.txt""" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in sorted(results.items() ):
logger.info(F''' {key} = {value}''' )
writer.write(F'''{key} = {value}\n''' )
return results
def __lowerCAmelCase ( UpperCAmelCase__ : Optional[Any] ) -> Optional[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 272
| 0
|
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('''1.6'''):
_lowerCamelCase : Any = True
from torch.cuda.amp import autocast
_lowerCamelCase : List[str] = logging.getLogger(__name__)
def A__ ( __A : Any=None , __A : Any=None ) ->Optional[int]:
return field(default_factory=lambda: default , metadata=__A )
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
lowercase_ = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
lowercase_ = field(
default=__magic_name__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
lowercase_ = field(
default=__magic_name__ , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
lowercase_ = field(
default=0.1 , metadata={"""help""": """The dropout ratio for the attention probabilities."""} )
lowercase_ = field(
default=0.1 , metadata={"""help""": """The dropout ratio for activations inside the fully connected layer."""} )
lowercase_ = field(
default=0.1 , metadata={
"""help""": """The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."""
} , )
lowercase_ = field(
default=0.1 , metadata={"""help""": """The dropout probabilitiy for all 1D convolutional layers in feature extractor."""} , )
lowercase_ = field(
default=0.05 , metadata={
"""help""": (
"""Propability of each feature vector along the time axis to be chosen as the start of the vector"""
"""span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"""
"""vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."""
)
} , )
lowercase_ = field(default=0.0 , metadata={"""help""": """The LayerDrop probability."""} )
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
lowercase_ = field(
default=__magic_name__ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
lowercase_ = field(
default="""train+validation""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} , )
lowercase_ = field(
default=__magic_name__ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
lowercase_ = field(
default=__magic_name__ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
lowercase_ = field(
default=__magic_name__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
lowercase_ = field(
default=__magic_name__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of validation examples to this """
"""value if set."""
)
} , )
lowercase_ = list_field(
default=[""",""", """?""", """.""", """!""", """-""", """;""", """:""", """\"\"""", """%""", """'""", """\"""", """�"""] , metadata={"""help""": """A list of characters to remove from the transcripts."""} , )
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
lowercase_ = 42
lowercase_ = True
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
def __call__( self , lowercase__ ):
'''simple docstring'''
__A =[{'''input_values''': feature['''input_values''']} for feature in features]
__A =[{'''input_ids''': feature['''labels''']} for feature in features]
__A =self.processor.pad(
lowercase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
__A =self.processor.pad(
labels=lowercase__ , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='''pt''' , )
# replace padding with -100 to ignore loss correctly
__A =labels_batch['''input_ids'''].masked_fill(labels_batch.attention_mask.ne(1 ) , -1_0_0 )
__A =labels
return batch
class lowerCAmelCase__ ( __magic_name__ ):
'''simple docstring'''
def __UpperCamelCase ( self , lowercase__ , lowercase__ ):
'''simple docstring'''
model.train()
__A =self._prepare_inputs(lowercase__ )
if self.use_amp:
with autocast():
__A =self.compute_loss(lowercase__ , lowercase__ )
else:
__A =self.compute_loss(lowercase__ , lowercase__ )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
__A =loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
__A =loss.sum() / (inputs['''labels'''] >= 0).sum()
else:
raise ValueError(f'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
__A =loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(lowercase__ ).backward()
elif self.use_apex:
with amp.scale_loss(lowercase__ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(lowercase__ )
else:
loss.backward()
return loss.detach()
def A__ ( ) ->int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__A =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__A , __A , __A =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__A , __A , __A =parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__A =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__A =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , __A )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
__A =datasets.load_dataset(
'''common_voice''' , data_args.dataset_config_name , split=data_args.train_split_name )
__A =datasets.load_dataset('''common_voice''' , data_args.dataset_config_name , split='''test''' )
# Create and save tokenizer
__A =F'''[{''.join(data_args.chars_to_ignore )}]'''
def remove_special_characters(__A : Optional[int] ):
__A =re.sub(__A , '''''' , batch['''sentence'''] ).lower() + ''' '''
return batch
__A =train_dataset.map(__A , remove_columns=['''sentence'''] )
__A =eval_dataset.map(__A , remove_columns=['''sentence'''] )
def extract_all_chars(__A : Union[str, Any] ):
__A =''' '''.join(batch['''text'''] )
__A =list(set(__A ) )
return {"vocab": [vocab], "all_text": [all_text]}
__A =train_dataset.map(
__A , batched=__A , batch_size=-1 , keep_in_memory=__A , remove_columns=train_dataset.column_names , )
__A =train_dataset.map(
__A , batched=__A , batch_size=-1 , keep_in_memory=__A , remove_columns=eval_dataset.column_names , )
__A =list(set(vocab_train['''vocab'''][0] ) | set(vocab_test['''vocab'''][0] ) )
__A ={v: k for k, v in enumerate(__A )}
__A =vocab_dict[''' ''']
del vocab_dict[" "]
__A =len(__A )
__A =len(__A )
with open('''vocab.json''' , '''w''' ) as vocab_file:
json.dump(__A , __A )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__A =WavaVecaCTCTokenizer(
'''vocab.json''' , unk_token='''[UNK]''' , pad_token='''[PAD]''' , word_delimiter_token='''|''' , )
__A =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0.0 , do_normalize=__A , return_attention_mask=__A )
__A =WavaVecaProcessor(feature_extractor=__A , tokenizer=__A )
__A =WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='''mean''' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
__A =min(len(__A ) , data_args.max_train_samples )
__A =train_dataset.select(range(__A ) )
if data_args.max_val_samples is not None:
__A =eval_dataset.select(range(data_args.max_val_samples ) )
__A =torchaudio.transforms.Resample(4_80_00 , 1_60_00 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(__A : Union[str, Any] ):
__A , __A =torchaudio.load(batch['''path'''] )
__A =resampler(__A ).squeeze().numpy()
__A =1_60_00
__A =batch['''text''']
return batch
__A =train_dataset.map(
__A , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
__A =eval_dataset.map(
__A , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(__A : Optional[Any] ):
# check that all files have the correct sampling rate
assert (
len(set(batch['''sampling_rate'''] ) ) == 1
), F'''Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}.'''
__A =processor(
audio=batch['''speech'''] , text=batch['''target_text'''] , sampling_rate=batch['''sampling_rate'''][0] )
batch.update(__A )
return batch
__A =train_dataset.map(
__A , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=__A , num_proc=data_args.preprocessing_num_workers , )
__A =eval_dataset.map(
__A , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=__A , num_proc=data_args.preprocessing_num_workers , )
# Metric
__A =datasets.load_metric('''wer''' )
def compute_metrics(__A : int ):
__A =pred.predictions
__A =np.argmax(__A , axis=-1 )
__A =processor.tokenizer.pad_token_id
__A =processor.batch_decode(__A )
# we do not want to group tokens when computing the metrics
__A =processor.batch_decode(pred.label_ids , group_tokens=__A )
__A =wer_metric.compute(predictions=__A , references=__A )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
__A =DataCollatorCTCWithPadding(processor=__A , padding=__A )
# Initialize our Trainer
__A =CTCTrainer(
model=__A , data_collator=__A , args=__A , compute_metrics=__A , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__A =last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
__A =model_args.model_name_or_path
else:
__A =None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
__A =trainer.train(resume_from_checkpoint=__A )
trainer.save_model()
__A =train_result.metrics
__A =(
data_args.max_train_samples if data_args.max_train_samples is not None else len(__A )
)
__A =min(__A , len(__A ) )
trainer.log_metrics('''train''' , __A )
trainer.save_metrics('''train''' , __A )
trainer.save_state()
# Evaluation
__A ={}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__A =trainer.evaluate()
__A =data_args.max_val_samples if data_args.max_val_samples is not None else len(__A )
__A =min(__A , len(__A ) )
trainer.log_metrics('''eval''' , __A )
trainer.save_metrics('''eval''' , __A )
return results
if __name__ == "__main__":
main()
| 516
|
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
class lowerCAmelCase__ ( __magic_name__ ):
'''simple docstring'''
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
warnings.warn(
'''The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DPTImageProcessor instead.''' , lowercase__ , )
super().__init__(*lowercase__ , **lowercase__ )
| 516
| 1
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase__ ( unittest.TestCase ):
@slow
def A_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=UpperCAmelCase_ ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('google/mt5-small' )
SCREAMING_SNAKE_CASE__ = tokenizer('Hello there' , return_tensors='pt' ).input_ids
SCREAMING_SNAKE_CASE__ = tokenizer('Hi I am' , return_tensors='pt' ).input_ids
SCREAMING_SNAKE_CASE__ = model(input_ids.to(UpperCAmelCase_ ) , labels=labels.to(UpperCAmelCase_ ) ).loss
SCREAMING_SNAKE_CASE__ = -(labels.shape[-1] * loss.item())
SCREAMING_SNAKE_CASE__ = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 472
|
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _lowercase ( ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 'https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg'
SCREAMING_SNAKE_CASE__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw ).convert('RGB' )
return image
def _lowercase ( UpperCamelCase_ ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'visual_encoder.blocks.{i}.norm1.weight', F'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm1.bias', F'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm2.weight', F'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm2.bias', F'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.qkv.weight', F'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.proj.weight', F'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.proj.bias', F'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc1.weight', F'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc1.bias', F'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc2.weight', F'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc2.bias', F'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.embeddings.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.embeddings.layernorm.bias') )
# fmt: on
return rename_keys
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = dct.pop(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = val
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> str:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
SCREAMING_SNAKE_CASE__ = state_dict.pop(F'visual_encoder.blocks.{i}.attn.q_bias' )
SCREAMING_SNAKE_CASE__ = state_dict.pop(F'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
SCREAMING_SNAKE_CASE__ = torch.cat((q_bias, torch.zeros_like(UpperCamelCase_ , requires_grad=UpperCamelCase_ ), v_bias) )
SCREAMING_SNAKE_CASE__ = qkv_bias
def _lowercase ( UpperCamelCase_ ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 364 if 'coco' in model_name else 224
SCREAMING_SNAKE_CASE__ = InstructBlipVisionConfig(image_size=UpperCamelCase_ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
SCREAMING_SNAKE_CASE__ = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
SCREAMING_SNAKE_CASE__ = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
SCREAMING_SNAKE_CASE__ = LlamaConfig.from_pretrained('decapoda-research/llama-7b-hf' , vocab_size=32001 ).to_dict()
elif "vicuna-13b" in model_name:
SCREAMING_SNAKE_CASE__ = LlamaConfig.from_pretrained('decapoda-research/llama-13b-hf' , vocab_size=32001 ).to_dict()
else:
raise ValueError('Model name not supported' )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
SCREAMING_SNAKE_CASE__ = InstructBlipQFormerConfig(vocab_size=30523 ).to_dict()
SCREAMING_SNAKE_CASE__ = InstructBlipConfig(vision_config=UpperCamelCase_ , text_config=UpperCamelCase_ , qformer_config=UpperCamelCase_ )
return config, image_size
@torch.no_grad()
def _lowercase ( UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=False ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('bert-base-uncased' , truncation_side='left' )
qformer_tokenizer.add_special_tokens({'bos_token': '[DEC]'} )
if "t5" in model_name:
SCREAMING_SNAKE_CASE__ = TaTokenizerFast.from_pretrained('google/flan-t5-xl' , truncation_side='left' )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
SCREAMING_SNAKE_CASE__ = LlamaTokenizerFast.from_pretrained(
'huggyllama/llama-7b' , truncation_side='left' , bos_token='</s>' , unk_token='</s>' )
tokenizer.add_special_tokens({'pad_token': '[PAD]'} )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_blipa_config(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = InstructBlipForConditionalGeneration(UpperCamelCase_ ).eval()
SCREAMING_SNAKE_CASE__ = {
'instructblip-vicuna-7b': ('blip2_vicuna_instruct', 'vicuna7b'),
'instructblip-vicuna-13b': ('blip2_vicuna_instruct', 'vicuna13b'),
'instructblip-flan-t5-xl': ('blip2_t5_instruct', 'flant5xl'),
'instructblip-flan-t5-xxl': ('blip2_t5_instruct', 'flant5xxl'),
}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
SCREAMING_SNAKE_CASE__ = 'cuda:1' if torch.cuda.is_available() else 'cpu'
SCREAMING_SNAKE_CASE__ = 'cuda:2' if torch.cuda.is_available() else 'cpu'
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = load_model_and_preprocess(
name=UpperCamelCase_ , model_type=UpperCamelCase_ , is_eval=UpperCamelCase_ , device=UpperCamelCase_ )
original_model.eval()
print('Done!' )
# update state dict keys
SCREAMING_SNAKE_CASE__ = original_model.state_dict()
SCREAMING_SNAKE_CASE__ = create_rename_keys(UpperCamelCase_ )
for src, dest in rename_keys:
rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
SCREAMING_SNAKE_CASE__ = state_dict.pop(UpperCamelCase_ )
if key.startswith('Qformer.bert' ):
SCREAMING_SNAKE_CASE__ = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
SCREAMING_SNAKE_CASE__ = key.replace('self' , 'attention' )
if "llm_proj" in key:
SCREAMING_SNAKE_CASE__ = key.replace('llm_proj' , 'language_projection' )
if "t5_proj" in key:
SCREAMING_SNAKE_CASE__ = key.replace('t5_proj' , 'language_projection' )
if key.startswith('llm_model' ):
SCREAMING_SNAKE_CASE__ = key.replace('llm_model' , 'language_model' )
if key.startswith('t5' ):
SCREAMING_SNAKE_CASE__ = key.replace('t5' , 'language' )
SCREAMING_SNAKE_CASE__ = val
# read in qv biases
read_in_q_v_bias(UpperCamelCase_ , UpperCamelCase_ )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(UpperCamelCase_ , strict=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = load_demo_image()
SCREAMING_SNAKE_CASE__ = 'What is unusual about this image?'
# create processor
SCREAMING_SNAKE_CASE__ = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=UpperCamelCase_ , image_std=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = InstructBlipProcessor(
image_processor=UpperCamelCase_ , tokenizer=UpperCamelCase_ , qformer_tokenizer=UpperCamelCase_ , )
SCREAMING_SNAKE_CASE__ = processor(images=UpperCamelCase_ , text=UpperCamelCase_ , return_tensors='pt' ).to(UpperCamelCase_ )
# make sure processor creates exact same pixel values
SCREAMING_SNAKE_CASE__ = vis_processors['eval'](UpperCamelCase_ ).unsqueeze(0 ).to(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , UpperCamelCase_ )
original_model.to(UpperCamelCase_ )
hf_model.to(UpperCamelCase_ )
with torch.no_grad():
if "vicuna" in model_name:
SCREAMING_SNAKE_CASE__ = original_model({'image': original_pixel_values, 'text_input': [prompt]} ).logits
SCREAMING_SNAKE_CASE__ = hf_model(**UpperCamelCase_ ).logits
else:
SCREAMING_SNAKE_CASE__ = original_model(
{'image': original_pixel_values, 'text_input': [prompt], 'text_output': ['\n']} ).logits
SCREAMING_SNAKE_CASE__ = tokenizer('\n' , return_tensors='pt' ).input_ids.to(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 )
SCREAMING_SNAKE_CASE__ = hf_model(**UpperCamelCase_ , labels=UpperCamelCase_ ).logits
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
SCREAMING_SNAKE_CASE__ = 1e-4 if 'vicuna' in model_name else 1e-5
assert torch.allclose(original_logits.to(logits.device ) , UpperCamelCase_ , atol=UpperCamelCase_ )
print('Looks ok!' )
print('Generating with original model...' )
SCREAMING_SNAKE_CASE__ = original_model.generate({'image': original_pixel_values, 'prompt': prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print('Generating with HF model...' )
SCREAMING_SNAKE_CASE__ = hf_model.generate(
**UpperCamelCase_ , do_sample=UpperCamelCase_ , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
SCREAMING_SNAKE_CASE__ = 2
print('Original generation:' , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = processor.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = [text.strip() for text in output_text]
print('HF generation:' , UpperCamelCase_ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(UpperCamelCase_ )
hf_model.save_pretrained(UpperCamelCase_ )
if push_to_hub:
processor.push_to_hub(F'Salesforce/{model_name}' )
hf_model.push_to_hub(F'Salesforce/{model_name}' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
__snake_case = [
"""instructblip-vicuna-7b""",
"""instructblip-vicuna-13b""",
"""instructblip-flan-t5-xl""",
"""instructblip-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""instructblip-flan-t5-xl""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
__snake_case = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 472
| 1
|
import argparse
from collections import defaultdict
import yaml
__a: Optional[Any] = '''docs/source/en/_toctree.yml'''
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> Union[str, Any]:
_UpperCAmelCase = defaultdict(__snake_case )
_UpperCAmelCase = []
_UpperCAmelCase = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"""local""": doc["""local"""], """title""": doc["""title"""]} )
else:
new_doc_list.append(__snake_case )
_UpperCAmelCase = new_doc_list
_UpperCAmelCase = [key for key, value in counts.items() if value > 1]
_UpperCAmelCase = []
for duplicate_key in duplicates:
_UpperCAmelCase = list({doc["""title"""] for doc in doc_list if doc["""local"""] == duplicate_key} )
if len(__snake_case ) > 1:
raise ValueError(
f"""{duplicate_key} is present several times in the documentation table of content at """
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if """local""" not in counts or counts[doc["""local"""]] == 1] )
_UpperCAmelCase = sorted(__snake_case , key=lambda __snake_case : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(__snake_case ) > 1:
raise ValueError("""{doc_list} has two \'overview\' docs which is not allowed.""" )
overview_doc.extend(__snake_case )
# Sort
return overview_doc
def _SCREAMING_SNAKE_CASE ( __snake_case=False ) -> List[str]:
with open(__snake_case , encoding="""utf-8""" ) as f:
_UpperCAmelCase = yaml.safe_load(f.read() )
# Get to the API doc
_UpperCAmelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_UpperCAmelCase = content[api_idx]["""sections"""]
# Then to the model doc
_UpperCAmelCase = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
_UpperCAmelCase = api_doc[scheduler_idx]["""sections"""]
_UpperCAmelCase = clean_doc_toc(__snake_case )
_UpperCAmelCase = False
if new_scheduler_doc != scheduler_doc:
_UpperCAmelCase = True
if overwrite:
_UpperCAmelCase = new_scheduler_doc
if diff:
if overwrite:
_UpperCAmelCase = api_doc
with open(__snake_case , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(__snake_case , allow_unicode=__snake_case ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
def _SCREAMING_SNAKE_CASE ( __snake_case=False ) -> List[Any]:
with open(__snake_case , encoding="""utf-8""" ) as f:
_UpperCAmelCase = yaml.safe_load(f.read() )
# Get to the API doc
_UpperCAmelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_UpperCAmelCase = content[api_idx]["""sections"""]
# Then to the model doc
_UpperCAmelCase = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
_UpperCAmelCase = False
_UpperCAmelCase = api_doc[pipeline_idx]["""sections"""]
_UpperCAmelCase = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
_UpperCAmelCase = pipeline_doc["""section"""]
_UpperCAmelCase = clean_doc_toc(__snake_case )
if overwrite:
_UpperCAmelCase = new_sub_pipeline_doc
new_pipeline_docs.append(__snake_case )
# sort overall pipeline doc
_UpperCAmelCase = clean_doc_toc(__snake_case )
if new_pipeline_docs != pipeline_docs:
_UpperCAmelCase = True
if overwrite:
_UpperCAmelCase = new_pipeline_docs
if diff:
if overwrite:
_UpperCAmelCase = api_doc
with open(__snake_case , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(__snake_case , allow_unicode=__snake_case ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
__a: Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__a: Any = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 712
|
__a: int = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__a: List[str] = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case ) -> list[int]:
_UpperCAmelCase = True
_UpperCAmelCase = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(__snake_case , __snake_case , __snake_case )
order.append(__snake_case )
return order
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case ) -> list[int]:
_UpperCAmelCase = True
_UpperCAmelCase = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(__snake_case , __snake_case , __snake_case )
return component
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> list[list[int]]:
_UpperCAmelCase = len(__snake_case ) * [False]
_UpperCAmelCase = {vert: [] for vert in range(len(__snake_case ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(__snake_case )
_UpperCAmelCase = []
for i, was_visited in enumerate(__snake_case ):
if not was_visited:
order += topology_sort(__snake_case , __snake_case , __snake_case )
_UpperCAmelCase = []
_UpperCAmelCase = len(__snake_case ) * [False]
for i in range(len(__snake_case ) ):
_UpperCAmelCase = order[len(__snake_case ) - i - 1]
if not visited[vert]:
_UpperCAmelCase = find_components(__snake_case , __snake_case , __snake_case )
components_list.append(__snake_case )
return components_list
| 402
| 0
|
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
snake_case__ = random.Random()
def lowerCamelCase_ ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any]=1.0 , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Any=None ):
if rng is None:
lowercase : Tuple = global_rng
lowercase : Union[str, Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase ( unittest.TestCase ):
def __init__( self : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[Any]=7 , lowerCAmelCase : str=400 , lowerCAmelCase : Optional[int]=2000 , lowerCAmelCase : Optional[Any]=1 , lowerCAmelCase : Dict=0.0 , lowerCAmelCase : List[Any]=1_6000 , lowerCAmelCase : List[Any]=True , lowerCAmelCase : Union[str, Any]=True , ):
lowercase : Optional[Any] = parent
lowercase : int = batch_size
lowercase : Tuple = min_seq_length
lowercase : List[Any] = max_seq_length
lowercase : Optional[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowercase : Optional[Any] = feature_size
lowercase : Dict = padding_value
lowercase : Optional[int] = sampling_rate
lowercase : List[str] = return_attention_mask
lowercase : Tuple = do_normalize
def _lowerCAmelCase ( self : Any ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase : Optional[Any]=False , lowerCAmelCase : Any=False ):
def _flatten(lowerCAmelCase : Any ):
return list(itertools.chain(*lowerCAmelCase ) )
if equal_length:
lowercase : Dict = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowercase : Dict = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowercase : Any = [np.asarray(lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
class UpperCAmelCase ( __lowerCamelCase , unittest.TestCase ):
a__: List[str] = WavaVecaFeatureExtractor
def _lowerCAmelCase ( self : Optional[int] ):
lowercase : Optional[int] = WavaVecaFeatureExtractionTester(self )
def _lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Dict ):
self.assertTrue(np.all(np.mean(lowerCAmelCase , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCAmelCase , axis=0 ) - 1 ) < 1E-3 ) )
def _lowerCAmelCase ( self : Tuple ):
# Tests that all call wrap to encode_plus and batch_encode_plus
lowercase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowercase : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase : List[str] = [np.asarray(lowerCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
lowercase : Optional[int] = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
lowercase : Optional[Any] = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-3 ) )
# Test batched
lowercase : Dict = feat_extract(lowerCAmelCase , return_tensors='''np''' ).input_values
lowercase : str = feat_extract(lowerCAmelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowercase : int = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowercase : List[str] = np.asarray(lowerCAmelCase )
lowercase : Optional[int] = feat_extract(lowerCAmelCase , return_tensors='''np''' ).input_values
lowercase : Tuple = feat_extract(lowerCAmelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-3 ) )
def _lowerCAmelCase ( self : Dict ):
lowercase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase : int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase : int = ['''longest''', '''max_length''', '''do_not_pad''']
lowercase : Dict = [None, 1600, None]
for max_length, padding in zip(lowerCAmelCase , lowerCAmelCase ):
lowercase : List[Any] = feat_extract(lowerCAmelCase , padding=lowerCAmelCase , max_length=lowerCAmelCase , return_tensors='''np''' )
lowercase : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def _lowerCAmelCase ( self : Union[str, Any] ):
lowercase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase : Dict = range(800 , 1400 , 200 )
lowercase : Optional[int] = [floats_list((1, x) )[0] for x in lengths]
lowercase : Optional[int] = ['''longest''', '''max_length''', '''do_not_pad''']
lowercase : int = [None, 1600, None]
for max_length, padding in zip(lowerCAmelCase , lowerCAmelCase ):
lowercase : List[Any] = feat_extract(lowerCAmelCase , max_length=lowerCAmelCase , padding=lowerCAmelCase )
lowercase : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def _lowerCAmelCase ( self : Any ):
lowercase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase : Optional[Any] = feat_extract(
lowerCAmelCase , truncation=lowerCAmelCase , max_length=1000 , padding='''max_length''' , return_tensors='''np''' )
lowercase : str = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def _lowerCAmelCase ( self : Optional[Any] ):
lowercase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase : Any = feat_extract(
lowerCAmelCase , truncation=lowerCAmelCase , max_length=1000 , padding='''longest''' , return_tensors='''np''' )
lowercase : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
lowercase : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase : int = feat_extract(
lowerCAmelCase , truncation=lowerCAmelCase , max_length=2000 , padding='''longest''' , return_tensors='''np''' )
lowercase : Optional[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
@require_torch
def _lowerCAmelCase ( self : Union[str, Any] ):
import torch
lowercase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase : Optional[int] = np.random.rand(100 ).astype(np.floataa )
lowercase : Optional[int] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowercase : Union[str, Any] = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowercase : Optional[int] = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def _lowerCAmelCase ( self : str ):
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
lowercase : Any = WavaVecaConfig.from_pretrained(lowerCAmelCase )
lowercase : Tuple = WavaVecaFeatureExtractor.from_pretrained(lowerCAmelCase )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == '''layer''' )
| 583
|
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
snake_case__ = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
snake_case__ = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
snake_case__ = re.compile(R"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
snake_case__ = re.compile(R"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
snake_case__ = re.compile(R"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
snake_case__ = [
("""pretraining""", """MODEL_FOR_PRETRAINING_MAPPING_NAMES""", """AutoModelForPreTraining"""),
("""feature-extraction""", """MODEL_MAPPING_NAMES""", """AutoModel"""),
("""audio-classification""", """MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForAudioClassification"""),
("""text-generation""", """MODEL_FOR_CAUSAL_LM_MAPPING_NAMES""", """AutoModelForCausalLM"""),
("""automatic-speech-recognition""", """MODEL_FOR_CTC_MAPPING_NAMES""", """AutoModelForCTC"""),
("""image-classification""", """MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForImageClassification"""),
("""image-segmentation""", """MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES""", """AutoModelForImageSegmentation"""),
("""fill-mask""", """MODEL_FOR_MASKED_LM_MAPPING_NAMES""", """AutoModelForMaskedLM"""),
("""object-detection""", """MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES""", """AutoModelForObjectDetection"""),
(
"""zero-shot-object-detection""",
"""MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES""",
"""AutoModelForZeroShotObjectDetection""",
),
("""question-answering""", """MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES""", """AutoModelForQuestionAnswering"""),
("""text2text-generation""", """MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES""", """AutoModelForSeq2SeqLM"""),
("""text-classification""", """MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForSequenceClassification"""),
("""automatic-speech-recognition""", """MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES""", """AutoModelForSpeechSeq2Seq"""),
(
"""table-question-answering""",
"""MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES""",
"""AutoModelForTableQuestionAnswering""",
),
("""token-classification""", """MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForTokenClassification"""),
("""multiple-choice""", """MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES""", """AutoModelForMultipleChoice"""),
(
"""next-sentence-prediction""",
"""MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES""",
"""AutoModelForNextSentencePrediction""",
),
(
"""audio-frame-classification""",
"""MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES""",
"""AutoModelForAudioFrameClassification""",
),
("""audio-xvector""", """MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES""", """AutoModelForAudioXVector"""),
(
"""document-question-answering""",
"""MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES""",
"""AutoModelForDocumentQuestionAnswering""",
),
(
"""visual-question-answering""",
"""MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES""",
"""AutoModelForVisualQuestionAnswering""",
),
("""image-to-text""", """MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES""", """AutoModelForVision2Seq"""),
(
"""zero-shot-image-classification""",
"""MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES""",
"""AutoModelForZeroShotImageClassification""",
),
("""depth-estimation""", """MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES""", """AutoModelForDepthEstimation"""),
("""video-classification""", """MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForVideoClassification"""),
("""mask-generation""", """MODEL_FOR_MASK_GENERATION_MAPPING_NAMES""", """AutoModelForMaskGeneration"""),
]
def lowerCamelCase_ ( UpperCAmelCase_ : Tuple ):
lowercase : Union[str, Any] = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , UpperCAmelCase_ )
return [m.group(0 ) for m in matches]
def lowerCamelCase_ ( ):
lowercase : str = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
lowercase : str = {
config.replace('''Config''' , '''''' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
lowercase : int = collections.defaultdict(UpperCAmelCase_ )
lowercase : Optional[Any] = collections.defaultdict(UpperCAmelCase_ )
lowercase : Tuple = collections.defaultdict(UpperCAmelCase_ )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(UpperCAmelCase_ ):
lowercase : Union[str, Any] = None
if _re_tf_models.match(UpperCAmelCase_ ) is not None:
lowercase : Optional[int] = tf_models
lowercase : Optional[Any] = _re_tf_models.match(UpperCAmelCase_ ).groups()[0]
elif _re_flax_models.match(UpperCAmelCase_ ) is not None:
lowercase : Optional[int] = flax_models
lowercase : Dict = _re_flax_models.match(UpperCAmelCase_ ).groups()[0]
elif _re_pt_models.match(UpperCAmelCase_ ) is not None:
lowercase : List[str] = pt_models
lowercase : Union[str, Any] = _re_pt_models.match(UpperCAmelCase_ ).groups()[0]
if lookup_dict is not None:
while len(UpperCAmelCase_ ) > 0:
if attr_name in model_prefix_to_model_type:
lowercase : Any = True
break
# Try again after removing the last word in the name
lowercase : List[Any] = ''''''.join(camel_case_split(UpperCAmelCase_ )[:-1] )
lowercase : List[Any] = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
lowercase : Tuple = list(UpperCAmelCase_ )
all_models.sort()
lowercase : str = {'''model_type''': all_models}
lowercase : List[Any] = [pt_models[t] for t in all_models]
lowercase : str = [tf_models[t] for t in all_models]
lowercase : Optional[int] = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
lowercase : Optional[int] = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
lowercase : List[Any] = '''AutoProcessor'''
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
lowercase : Optional[Any] = '''AutoTokenizer'''
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
lowercase : str = '''AutoFeatureExtractor'''
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
lowercase : Any = '''AutoTokenizer'''
lowercase : Tuple = [processors[t] for t in all_models]
return pd.DataFrame(UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[int] ):
lowercase : str = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
lowercase : Tuple = [model_mapping, f'''TF_{model_mapping}''', f'''FLAX_{model_mapping}''']
lowercase : List[Any] = [auto_class, f'''TF_{auto_class}''', f'''Flax_{auto_class}''']
# Loop through all three frameworks
for module, cls, mapping in zip(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
# The type of pipeline may not exist in this framework
if not hasattr(UpperCAmelCase_ , UpperCAmelCase_ ):
continue
# First extract all model_names
lowercase : Tuple = []
for name in getattr(UpperCAmelCase_ , UpperCAmelCase_ ).values():
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
model_names.append(UpperCAmelCase_ )
else:
model_names.extend(list(UpperCAmelCase_ ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] ):
lowercase : Union[str, Any] = get_frameworks_table()
lowercase : Optional[int] = Dataset.from_pandas(UpperCAmelCase_ )
lowercase : Optional[Any] = hf_hub_download(
'''huggingface/transformers-metadata''' , '''pipeline_tags.json''' , repo_type='''dataset''' , token=UpperCAmelCase_ )
lowercase : Optional[Any] = Dataset.from_json(UpperCAmelCase_ )
lowercase : List[str] = {
tags_dataset[i]['''model_class''']: (tags_dataset[i]['''pipeline_tag'''], tags_dataset[i]['''auto_class'''])
for i in range(len(UpperCAmelCase_ ) )
}
lowercase : Optional[Any] = update_pipeline_and_auto_class_table(UpperCAmelCase_ )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
lowercase : List[str] = sorted(table.keys() )
lowercase : Tuple = pd.DataFrame(
{
'''model_class''': model_classes,
'''pipeline_tag''': [table[m][0] for m in model_classes],
'''auto_class''': [table[m][1] for m in model_classes],
} )
lowercase : Union[str, Any] = Dataset.from_pandas(UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(UpperCAmelCase_ , '''frameworks.json''' ) )
tags_dataset.to_json(os.path.join(UpperCAmelCase_ , '''pipeline_tags.json''' ) )
if commit_sha is not None:
lowercase : Any = (
f'''Update with commit {commit_sha}\n\nSee: '''
f'''https://github.com/huggingface/transformers/commit/{commit_sha}'''
)
else:
lowercase : Dict = '''Update'''
upload_folder(
repo_id='''huggingface/transformers-metadata''' , folder_path=UpperCAmelCase_ , repo_type='''dataset''' , token=UpperCAmelCase_ , commit_message=UpperCAmelCase_ , )
def lowerCamelCase_ ( ):
lowercase : Union[str, Any] = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
lowercase : List[str] = transformers_module.pipelines.SUPPORTED_TASKS
lowercase : List[Any] = []
for key in pipeline_tasks:
if key not in in_table:
lowercase : Optional[Any] = pipeline_tasks[key]['''pt''']
if isinstance(UpperCAmelCase_ , (list, tuple) ):
lowercase : Optional[int] = model[0]
lowercase : str = model.__name__
if model not in in_table.values():
missing.append(UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > 0:
lowercase : Any = ''', '''.join(UpperCAmelCase_ )
raise ValueError(
'''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '''
f'''`utils/update_metadata.py`: {msg}. Please add them!''' )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
parser.add_argument("""--token""", type=str, help="""The token to use to push to the transformers-metadata dataset.""")
parser.add_argument("""--commit_sha""", type=str, help="""The sha of the commit going with this update.""")
parser.add_argument("""--check-only""", action="""store_true""", help="""Activate to just check all pipelines are present.""")
snake_case__ = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 583
| 1
|
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def A_( A , A ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(A , A ) ) )
def A_( A , A ):
if dataset.ndim != value_array.ndim:
UpperCAmelCase_ = (
"""Wrong input data's dimensions... """
f"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(A )
try:
if dataset.shape[1] != value_array.shape[1]:
UpperCAmelCase_ = (
"""Wrong input data's shape... """
f"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(A )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("""Wrong shape""" )
if dataset.dtype != value_array.dtype:
UpperCAmelCase_ = (
"""Input data have different datatype... """
f"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(A )
UpperCAmelCase_ = []
for value in value_array:
UpperCAmelCase_ = euclidean(A , dataset[0] )
UpperCAmelCase_ = dataset[0].tolist()
for dataset_value in dataset[1:]:
UpperCAmelCase_ = euclidean(A , A )
if dist > temp_dist:
UpperCAmelCase_ = temp_dist
UpperCAmelCase_ = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def A_( A , A ):
return np.dot(A , A ) / (norm(A ) * norm(A ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ : List[Any] = {"""configuration_sew""": ["""SEW_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SEWConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[str] = [
"""SEW_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SEWForCTC""",
"""SEWForSequenceClassification""",
"""SEWModel""",
"""SEWPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 486
| 0
|
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('''dataset_size''' ,[None, 4_00 * 2**20, 6_00 * 2**20] )
@pytest.mark.parametrize('''input_in_memory_max_size''' ,['''default''', 0, 1_00 * 2**20, 9_00 * 2**20] )
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config ,'''IN_MEMORY_MAX_SIZE''' ,UpperCamelCase_ )
snake_case = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
snake_case = dataset_size < in_memory_max_size
else:
snake_case = False
snake_case = is_small_dataset(UpperCamelCase_ )
assert result == expected
| 550
|
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("0.8.3"):
raise Exception("requires gluonnlp == 0.8.3")
if version.parse(mx.__version__) != version.parse("1.5.0"):
raise Exception("requires mxnet == 1.5.0")
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Any = "The Nymphenburg Palace is a beautiful palace in Munich!"
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = {
'''attention_cell''': '''multi_head''',
'''num_layers''': 4,
'''units''': 10_24,
'''hidden_size''': 7_68,
'''max_length''': 5_12,
'''num_heads''': 8,
'''scaled''': True,
'''dropout''': 0.1,
'''use_residual''': True,
'''embed_size''': 10_24,
'''embed_dropout''': 0.1,
'''word_embed''': None,
'''layer_norm_eps''': 1e-5,
'''token_type_vocab_size''': 2,
}
snake_case = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
snake_case = BERTEncoder(
attention_cell=predefined_args['''attention_cell'''] ,num_layers=predefined_args['''num_layers'''] ,units=predefined_args['''units'''] ,hidden_size=predefined_args['''hidden_size'''] ,max_length=predefined_args['''max_length'''] ,num_heads=predefined_args['''num_heads'''] ,scaled=predefined_args['''scaled'''] ,dropout=predefined_args['''dropout'''] ,output_attention=UpperCamelCase_ ,output_all_encodings=UpperCamelCase_ ,use_residual=predefined_args['''use_residual'''] ,activation=predefined_args.get('''activation''' ,'''gelu''' ) ,layer_norm_eps=predefined_args.get('''layer_norm_eps''' ,UpperCamelCase_ ) ,)
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
snake_case = '''openwebtext_ccnews_stories_books_cased'''
# Specify download folder to Gluonnlp's vocab
snake_case = os.path.join(get_home_dir() ,'''models''' )
snake_case = _load_vocab(UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,cls=UpperCamelCase_ )
snake_case = nlp.model.BERTModel(
UpperCamelCase_ ,len(UpperCamelCase_ ) ,units=predefined_args['''units'''] ,embed_size=predefined_args['''embed_size'''] ,embed_dropout=predefined_args['''embed_dropout'''] ,word_embed=predefined_args['''word_embed'''] ,use_pooler=UpperCamelCase_ ,use_token_type_embed=UpperCamelCase_ ,token_type_vocab_size=predefined_args['''token_type_vocab_size'''] ,use_classifier=UpperCamelCase_ ,use_decoder=UpperCamelCase_ ,)
original_bort.load_parameters(UpperCamelCase_ ,cast_dtype=UpperCamelCase_ ,ignore_extra=UpperCamelCase_ )
snake_case = original_bort._collect_params_with_prefix()
# Build our config 🤗
snake_case = {
'''architectures''': ['''BertForMaskedLM'''],
'''attention_probs_dropout_prob''': predefined_args['''dropout'''],
'''hidden_act''': '''gelu''',
'''hidden_dropout_prob''': predefined_args['''dropout'''],
'''hidden_size''': predefined_args['''embed_size'''],
'''initializer_range''': 0.02,
'''intermediate_size''': predefined_args['''hidden_size'''],
'''layer_norm_eps''': predefined_args['''layer_norm_eps'''],
'''max_position_embeddings''': predefined_args['''max_length'''],
'''model_type''': '''bort''',
'''num_attention_heads''': predefined_args['''num_heads'''],
'''num_hidden_layers''': predefined_args['''num_layers'''],
'''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa
'''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa
'''vocab_size''': len(UpperCamelCase_ ),
}
snake_case = BertConfig.from_dict(UpperCamelCase_ )
snake_case = BertForMaskedLM(UpperCamelCase_ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(UpperCamelCase_ ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(UpperCamelCase_ ,UpperCamelCase_ ):
snake_case = hf_param.shape
snake_case = to_torch(params[gluon_param] )
snake_case = gluon_param.shape
assert (
shape_hf == shape_gluon
), F'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'''
return gluon_param
snake_case = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight ,'''word_embed.0.weight''' )
snake_case = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight ,'''encoder.position_weight''' )
snake_case = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias ,'''encoder.layer_norm.beta''' )
snake_case = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight ,'''encoder.layer_norm.gamma''' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
snake_case = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
snake_case = hf_bort_model.bert.encoder.layer[i]
# self attention
snake_case = layer.attention.self
snake_case = check_and_map_params(
self_attn.key.bias.data ,F'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' )
snake_case = check_and_map_params(
self_attn.key.weight.data ,F'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' )
snake_case = check_and_map_params(
self_attn.query.bias.data ,F'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' )
snake_case = check_and_map_params(
self_attn.query.weight.data ,F'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' )
snake_case = check_and_map_params(
self_attn.value.bias.data ,F'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' )
snake_case = check_and_map_params(
self_attn.value.weight.data ,F'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' )
# self attention output
snake_case = layer.attention.output
snake_case = check_and_map_params(
self_output.dense.bias ,F'''encoder.transformer_cells.{i}.proj.bias''' )
snake_case = check_and_map_params(
self_output.dense.weight ,F'''encoder.transformer_cells.{i}.proj.weight''' )
snake_case = check_and_map_params(
self_output.LayerNorm.bias ,F'''encoder.transformer_cells.{i}.layer_norm.beta''' )
snake_case = check_and_map_params(
self_output.LayerNorm.weight ,F'''encoder.transformer_cells.{i}.layer_norm.gamma''' )
# intermediate
snake_case = layer.intermediate
snake_case = check_and_map_params(
intermediate.dense.bias ,F'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' )
snake_case = check_and_map_params(
intermediate.dense.weight ,F'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' )
# output
snake_case = layer.output
snake_case = check_and_map_params(
bert_output.dense.bias ,F'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' )
snake_case = check_and_map_params(
bert_output.dense.weight ,F'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' )
snake_case = check_and_map_params(
bert_output.LayerNorm.bias ,F'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' )
snake_case = check_and_map_params(
bert_output.LayerNorm.weight ,F'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
snake_case = RobertaTokenizer.from_pretrained('''roberta-base''' )
snake_case = tokenizer.encode_plus(UpperCamelCase_ )['''input_ids''']
# Get gluon output
snake_case = mx.nd.array([input_ids] )
snake_case = original_bort(inputs=UpperCamelCase_ ,token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(UpperCamelCase_ )
snake_case = BertModel.from_pretrained(UpperCamelCase_ )
hf_bort_model.eval()
snake_case = tokenizer.encode_plus(UpperCamelCase_ ,return_tensors='''pt''' )
snake_case = hf_bort_model(**UpperCamelCase_ )[0]
snake_case = output_gluon[0].asnumpy()
snake_case = output_hf[0].detach().numpy()
snake_case = np.max(np.abs(hf_layer - gluon_layer ) ).item()
snake_case = np.allclose(UpperCamelCase_ ,UpperCamelCase_ ,atol=1e-3 )
if success:
print('''✔️ Both model do output the same tensors''' )
else:
print('''❌ Both model do **NOT** output the same tensors''' )
print('''Absolute difference is:''' ,UpperCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 550
| 1
|
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : str ) -> int:
assert column_title.isupper()
SCREAMING_SNAKE_CASE_ : List[Any] =0
SCREAMING_SNAKE_CASE_ : Union[str, Any] =len(UpperCAmelCase_ ) - 1
SCREAMING_SNAKE_CASE_ : str =0
while index >= 0:
SCREAMING_SNAKE_CASE_ : Any =(ord(column_title[index] ) - 6_4) * pow(2_6 , UpperCAmelCase_ )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 719
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : list , UpperCAmelCase_ : int ) -> Tuple:
# Checks if the entire collection has been sorted
if len(UpperCAmelCase_ ) <= 1 or n <= 1:
return
insert_next(UpperCAmelCase_ , n - 1 )
rec_insertion_sort(UpperCAmelCase_ , n - 1 )
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : list , UpperCAmelCase_ : int ) -> List[Any]:
# Checks order between adjacent elements
if index >= len(UpperCAmelCase_ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str =(
collection[index],
collection[index - 1],
)
insert_next(UpperCAmelCase_ , index + 1 )
if __name__ == "__main__":
_lowercase = input("""Enter integers separated by spaces: """)
_lowercase = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 431
| 0
|
from __future__ import annotations
import math
import random
from typing import Any
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self: Dict ):
lowercase__ : list[Any] = []
lowercase__ : int = 0
lowercase__ : int = 0
def snake_case__( self: Optional[int] ):
return self.head == self.tail
def snake_case__( self: Any, lowerCamelCase_: Any ):
self.data.append(lowerCamelCase_ )
lowercase__ : int = self.tail + 1
def snake_case__( self: Tuple ):
lowercase__ : Optional[Any] = self.data[self.head]
lowercase__ : Optional[int] = self.head + 1
return ret
def snake_case__( self: List[Any] ):
return self.tail - self.head
def snake_case__( self: List[Any] ):
print(self.data )
print('**************' )
print(self.data[self.head : self.tail] )
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self: Union[str, Any], lowerCamelCase_: Any ):
lowercase__ : List[Any] = data
lowercase__ : MyNode | None = None
lowercase__ : MyNode | None = None
lowercase__ : int = 1
def snake_case__( self: Optional[Any] ):
return self.data
def snake_case__( self: List[str] ):
return self.left
def snake_case__( self: Optional[int] ):
return self.right
def snake_case__( self: Optional[Any] ):
return self.height
def snake_case__( self: Optional[int], lowerCamelCase_: Any ):
lowercase__ : Tuple = data
def snake_case__( self: Optional[int], lowerCamelCase_: MyNode | None ):
lowercase__ : Dict = node
def snake_case__( self: Union[str, Any], lowerCamelCase_: MyNode | None ):
lowercase__ : Optional[Any] = node
def snake_case__( self: List[Any], lowerCamelCase_: int ):
lowercase__ : Union[str, Any] = height
def SCREAMING_SNAKE_CASE__ ( _lowercase : MyNode | None ) -> int:
'''simple docstring'''
if node is None:
return 0
return node.get_height()
def SCREAMING_SNAKE_CASE__ ( _lowercase : int , _lowercase : int ) -> int:
'''simple docstring'''
if a > b:
return a
return b
def SCREAMING_SNAKE_CASE__ ( _lowercase : MyNode ) -> MyNode:
'''simple docstring'''
print('left rotation node:' , node.get_data() )
lowercase__ : Optional[int] = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(_lowercase )
lowercase__ : List[str] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(_lowercase )
lowercase__ : Optional[Any] = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(_lowercase )
return ret
def SCREAMING_SNAKE_CASE__ ( _lowercase : MyNode ) -> MyNode:
'''simple docstring'''
print('right rotation node:' , node.get_data() )
lowercase__ : Union[str, Any] = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(_lowercase )
lowercase__ : str = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(_lowercase )
lowercase__ : List[str] = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(_lowercase )
return ret
def SCREAMING_SNAKE_CASE__ ( _lowercase : MyNode ) -> MyNode:
'''simple docstring'''
lowercase__ : str = node.get_left()
assert left_child is not None
node.set_left(left_rotation(_lowercase ) )
return right_rotation(_lowercase )
def SCREAMING_SNAKE_CASE__ ( _lowercase : MyNode ) -> MyNode:
'''simple docstring'''
lowercase__ : Optional[Any] = node.get_right()
assert right_child is not None
node.set_right(right_rotation(_lowercase ) )
return left_rotation(_lowercase )
def SCREAMING_SNAKE_CASE__ ( _lowercase : MyNode | None , _lowercase : Any ) -> MyNode | None:
'''simple docstring'''
if node is None:
return MyNode(_lowercase )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , _lowercase ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
lowercase__ : Tuple = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
lowercase__ : Optional[Any] = right_rotation(_lowercase )
else:
lowercase__ : List[str] = lr_rotation(_lowercase )
else:
node.set_right(insert_node(node.get_right() , _lowercase ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
lowercase__ : Dict = node.get_right()
assert right_child is not None
if data < right_child.get_data():
lowercase__ : Dict = rl_rotation(_lowercase )
else:
lowercase__ : Tuple = left_rotation(_lowercase )
lowercase__ : List[Any] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(_lowercase )
return node
def SCREAMING_SNAKE_CASE__ ( _lowercase : MyNode ) -> Any:
'''simple docstring'''
while True:
lowercase__ : List[Any] = root.get_right()
if right_child is None:
break
lowercase__ : List[str] = right_child
return root.get_data()
def SCREAMING_SNAKE_CASE__ ( _lowercase : MyNode ) -> Any:
'''simple docstring'''
while True:
lowercase__ : Any = root.get_left()
if left_child is None:
break
lowercase__ : List[str] = left_child
return root.get_data()
def SCREAMING_SNAKE_CASE__ ( _lowercase : MyNode , _lowercase : Any ) -> MyNode | None:
'''simple docstring'''
lowercase__ : List[Any] = root.get_left()
lowercase__ : List[Any] = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
lowercase__ : Dict = get_left_most(_lowercase )
root.set_data(_lowercase )
root.set_right(del_node(_lowercase , _lowercase ) )
elif left_child is not None:
lowercase__ : List[Any] = left_child
elif right_child is not None:
lowercase__ : Dict = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print('No such data' )
return root
else:
root.set_left(del_node(_lowercase , _lowercase ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(_lowercase , _lowercase ) )
if get_height(_lowercase ) - get_height(_lowercase ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
lowercase__ : List[Any] = left_rotation(_lowercase )
else:
lowercase__ : Optional[Any] = rl_rotation(_lowercase )
elif get_height(_lowercase ) - get_height(_lowercase ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
lowercase__ : Optional[int] = right_rotation(_lowercase )
else:
lowercase__ : Dict = lr_rotation(_lowercase )
lowercase__ : Any = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(_lowercase )
return root
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self: str ):
lowercase__ : MyNode | None = None
def snake_case__( self: Optional[Any] ):
return get_height(self.root )
def snake_case__( self: List[Any], lowerCamelCase_: Any ):
print('insert:' + str(lowerCamelCase_ ) )
lowercase__ : int = insert_node(self.root, lowerCamelCase_ )
def snake_case__( self: Tuple, lowerCamelCase_: Any ):
print('delete:' + str(lowerCamelCase_ ) )
if self.root is None:
print('Tree is empty!' )
return
lowercase__ : int = del_node(self.root, lowerCamelCase_ )
def __str__( self: str, ): # a level traversale, gives a more intuitive look on the tree
lowercase__ : Optional[int] = ''
lowercase__ : Any = MyQueue()
q.push(self.root )
lowercase__ : Dict = self.get_height()
if layer == 0:
return output
lowercase__ : Optional[int] = 0
while not q.is_empty():
lowercase__ : Dict = q.pop()
lowercase__ : Optional[Any] = ' ' * int(math.pow(2, layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(lowerCamelCase_ )
q.push(lowerCamelCase_ )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
lowercase__ : Union[str, Any] = cnt + 1
for i in range(100 ):
if cnt == math.pow(2, lowerCamelCase_ ) - 1:
lowercase__ : Optional[int] = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def SCREAMING_SNAKE_CASE__ ( ) -> None:
'''simple docstring'''
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
__UpperCamelCase: int = AVLtree()
__UpperCamelCase: int = list(range(1_0))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 266
|
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self: List[str], lowerCamelCase_: Optional[Any], lowerCamelCase_: int, lowerCamelCase_: int ):
if dst_width < 0 or dst_height < 0:
raise ValueError('Destination width/height should be > 0' )
lowercase__ : str = img
lowercase__ : Tuple = img.shape[1]
lowercase__ : str = img.shape[0]
lowercase__ : Tuple = dst_width
lowercase__ : Any = dst_height
lowercase__ : Tuple = self.src_w / self.dst_w
lowercase__ : Union[str, Any] = self.src_h / self.dst_h
lowercase__ : List[Any] = (
np.ones((self.dst_h, self.dst_w, 3), np.uinta ) * 255
)
def snake_case__( self: Dict ):
for i in range(self.dst_h ):
for j in range(self.dst_w ):
lowercase__ : Tuple = self.img[self.get_y(lowerCamelCase_ )][self.get_x(lowerCamelCase_ )]
def snake_case__( self: List[str], lowerCamelCase_: int ):
return int(self.ratio_x * x )
def snake_case__( self: int, lowerCamelCase_: int ):
return int(self.ratio_y * y )
if __name__ == "__main__":
__UpperCamelCase, __UpperCamelCase: List[Any] = 8_0_0, 6_0_0
__UpperCamelCase: Optional[Any] = imread("""image_data/lena.jpg""", 1)
__UpperCamelCase: Union[str, Any] = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F'Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}', n.output
)
waitKey(0)
destroyAllWindows()
| 266
| 1
|
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class UpperCAmelCase ( _lowercase ):
def __init__(self : Optional[Any] , A__ : NestedDataStructureLike[PathLike] , A__ : Optional[NamedSplit] = None , A__ : Optional[Features] = None , A__ : str = None , A__ : bool = False , A__ : bool = False , A__ : Optional[str] = None , A__ : Optional[int] = None , **A__ : List[str] , ) -> Union[str, Any]:
super().__init__(
A__ , split=A__ , features=A__ , cache_dir=A__ , keep_in_memory=A__ , streaming=A__ , num_proc=A__ , **A__ , )
lowercase = field
lowercase = path_or_paths if isinstance(A__ , A__ ) else {self.split: path_or_paths}
lowercase = Json(
cache_dir=A__ , data_files=A__ , features=A__ , field=A__ , **A__ , )
def UpperCAmelCase__ (self : Dict ) -> str:
# Build iterable dataset
if self.streaming:
lowercase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowercase = None
lowercase = None
lowercase = None
lowercase = None
self.builder.download_and_prepare(
download_config=A__ , download_mode=A__ , verification_mode=A__ , base_path=A__ , num_proc=self.num_proc , )
lowercase = self.builder.as_dataset(
split=self.split , verification_mode=A__ , in_memory=self.keep_in_memory )
return dataset
class UpperCAmelCase :
def __init__(self : Union[str, Any] , A__ : Dataset , A__ : Union[PathLike, BinaryIO] , A__ : Optional[int] = None , A__ : Optional[int] = None , **A__ : Optional[int] , ) -> str:
if num_proc is not None and num_proc <= 0:
raise ValueError(f'num_proc {num_proc} must be an integer > 0.' )
lowercase = dataset
lowercase = path_or_buf
lowercase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
lowercase = num_proc
lowercase = "utf-8"
lowercase = to_json_kwargs
def UpperCAmelCase__ (self : Any ) -> int:
lowercase = self.to_json_kwargs.pop("path_or_buf" , A__ )
lowercase = self.to_json_kwargs.pop("orient" , "records" )
lowercase = self.to_json_kwargs.pop("lines" , True if orient == "records" else False )
lowercase = self.to_json_kwargs.pop("index" , False if orient in ["split", "table"] else True )
lowercase = self.to_json_kwargs.pop("compression" , A__ )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f'`datasets` currently does not support {compression} compression' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , "wb" , compression=A__ ) as buffer:
lowercase = self._write(file_obj=A__ , orient=A__ , lines=A__ , index=A__ , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f'The compression parameter is not supported when writing to a buffer, but compression={compression}'
" was passed. Please provide a local path instead." )
lowercase = self._write(
file_obj=self.path_or_buf , orient=A__ , lines=A__ , index=A__ , **self.to_json_kwargs )
return written
def UpperCAmelCase__ (self : List[str] , A__ : Dict ) -> Optional[Any]:
lowercase , lowercase , lowercase , lowercase , lowercase = args
lowercase = query_table(
table=self.dataset.data , key=slice(A__ , offset + self.batch_size ) , indices=self.dataset._indices , )
lowercase = batch.to_pandas().to_json(
path_or_buf=A__ , orient=A__ , lines=A__ , index=A__ , **A__ )
if not json_str.endswith("\n" ):
json_str += "\n"
return json_str.encode(self.encoding )
def UpperCAmelCase__ (self : str , A__ : BinaryIO , A__ : Optional[int] , A__ : List[str] , A__ : Dict , **A__ : Any , ) -> int:
lowercase = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
lowercase = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(A__ )
else:
lowercase , lowercase = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , A__ , A__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
written += file_obj.write(A__ )
return written
| 459
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=_lowercase )
class UpperCAmelCase ( _lowercase ):
UpperCAmelCase : str = field(default='''image-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCAmelCase : ClassVar[Features] = Features({'''image''': Image()} )
UpperCAmelCase : ClassVar[Features] = Features({'''labels''': ClassLabel} )
UpperCAmelCase : str = "image"
UpperCAmelCase : str = "labels"
def UpperCAmelCase__ (self : List[str] , A__ : Union[str, Any] ) -> Union[str, Any]:
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , A__ ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
lowercase = copy.deepcopy(self )
lowercase = self.label_schema.copy()
lowercase = features[self.label_column]
lowercase = label_schema
return task_template
@property
def UpperCAmelCase__ (self : List[Any] ) -> Dict[str, str]:
return {
self.image_column: "image",
self.label_column: "labels",
}
| 459
| 1
|
__UpperCamelCase : Tuple = 8.3_1_4_4_6_2 # Unit - J mol-1 K-1
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 80
|
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = [[] for _ in range(lowerCamelCase )]
__lowercase = key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1 or len(lowerCamelCase ) <= key:
return input_string
for position, character in enumerate(lowerCamelCase ):
__lowercase = position % (lowest * 2) # puts it in bounds
__lowercase = min(lowerCamelCase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(lowerCamelCase )
__lowercase = ["""""".join(lowerCamelCase ) for row in temp_grid]
__lowercase = """""".join(lowerCamelCase )
return output_string
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = []
__lowercase = key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1:
return input_string
__lowercase = [[] for _ in range(lowerCamelCase )] # generates template
for position in range(len(lowerCamelCase ) ):
__lowercase = position % (lowest * 2) # puts it in bounds
__lowercase = min(lowerCamelCase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("""*""" )
__lowercase = 0
for row in temp_grid: # fills in the characters
__lowercase = input_string[counter : counter + len(lowerCamelCase )]
grid.append(list(lowerCamelCase ) )
counter += len(lowerCamelCase )
__lowercase = """""" # reads as zigzag
for position in range(len(lowerCamelCase ) ):
__lowercase = position % (lowest * 2) # puts it in bounds
__lowercase = min(lowerCamelCase , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = {}
for key_guess in range(1 , len(lowerCamelCase ) ): # tries every key
__lowercase = decrypt(lowerCamelCase , lowerCamelCase )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80
| 1
|
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
lowercase = logging.getLogger(__name__)
@dataclass
class __lowercase ( __UpperCAmelCase ):
'''simple docstring'''
_A : Optional[float] = field(
default=0.0, metadata={'''help''': '''The label smoothing epsilon to apply (if not zero).'''} )
_A : bool = field(default=__UpperCAmelCase, metadata={'''help''': '''Whether to SortishSamler or not.'''} )
_A : bool = field(
default=__UpperCAmelCase, metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
_A : bool = field(default=__UpperCAmelCase, metadata={'''help''': '''whether to use adafactor'''} )
_A : Optional[float] = field(
default=__UpperCAmelCase, metadata={'''help''': '''Encoder layer dropout probability. Goes into model.config.'''} )
_A : Optional[float] = field(
default=__UpperCAmelCase, metadata={'''help''': '''Decoder layer dropout probability. Goes into model.config.'''} )
_A : Optional[float] = field(default=__UpperCAmelCase, metadata={'''help''': '''Dropout probability. Goes into model.config.'''} )
_A : Optional[float] = field(
default=__UpperCAmelCase, metadata={'''help''': '''Attention dropout probability. Goes into model.config.'''} )
_A : Optional[str] = field(
default='''linear''', metadata={'''help''': F'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''}, )
| 701
|
def lowerCamelCase_ ( UpperCamelCase__ : int = 100 ):
'''simple docstring'''
UpperCamelCase__ = (n * (n + 1) // 2) ** 2
UpperCamelCase__ = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f'{solution() = }')
| 591
| 0
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
SCREAMING_SNAKE_CASE_ = [
'EAGER',
'AOT_EAGER',
'INDUCTOR',
'NVFUSER',
'AOT_NVFUSER',
'AOT_CUDAGRAPHS',
'OFI',
'FX2TRT',
'ONNXRT',
'IPEX',
]
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=None ):
"""simple docstring"""
UpperCamelCase = True
while ask_again:
UpperCamelCase = input(_lowercase )
try:
if default is not None and len(_lowercase ) == 0:
return default
return convert_value(_lowercase ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(_lowercase )
def __snake_case ( _lowercase ,_lowercase=[] ,_lowercase=None ,_lowercase=0 ):
"""simple docstring"""
UpperCamelCase = BulletMenu(_lowercase ,_lowercase )
UpperCamelCase = menu.run(default_choice=_lowercase )
return convert_value(_lowercase ) if convert_value is not None else result
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = int(_lowercase )
return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] )
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = int(_lowercase )
return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] )
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = int(_lowercase )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = int(_lowercase )
return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] )
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = int(_lowercase )
return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] )
def __snake_case ( _lowercase ):
"""simple docstring"""
return {"yes": True, "no": False}[value.lower()]
class snake_case_ ( argparse.RawDescriptionHelpFormatter ):
"""simple docstring"""
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Any:
UpperCamelCase = super()._format_usage(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = usage.replace('''<command> [<args>] ''' , '''''')
return usage
| 34
|
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _UpperCAmelCase ( __a , __a , unittest.TestCase):
__a : Dict = IFInpaintingSuperResolutionPipeline
__a : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
__a : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""})
__a : Optional[int] = PipelineTesterMixin.required_optional_params - {"""latents"""}
def __snake_case ( self ) -> Optional[int]:
'''simple docstring'''
return self._get_superresolution_dummy_components()
def __snake_case ( self , _A , _A=0 ) -> Union[str, Any]:
'''simple docstring'''
if str(_A ).startswith("""mps""" ):
_UpperCAmelCase : Union[str, Any] = torch.manual_seed(_A )
else:
_UpperCAmelCase : Tuple = torch.Generator(device=_A ).manual_seed(_A )
_UpperCAmelCase : Tuple = floats_tensor((1, 3, 16, 16) , rng=random.Random(_A ) ).to(_A )
_UpperCAmelCase : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
_UpperCAmelCase : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
_UpperCAmelCase : Optional[int] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __snake_case ( self ) -> Union[str, Any]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def __snake_case ( self ) -> int:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def __snake_case ( self ) -> Tuple:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __snake_case ( self ) -> List[str]:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __snake_case ( self ) -> str:
'''simple docstring'''
self._test_save_load_local()
def __snake_case ( self ) -> List[str]:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 238
| 0
|
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = MobileBertTokenizer
UpperCAmelCase__ = MobileBertTokenizerFast
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = filter_non_english
UpperCAmelCase__ = "google/mobilebert-uncased"
def __lowercase( self ) -> int:
super().setUp()
__UpperCamelCase = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
__UpperCamelCase = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def __lowercase( self , _SCREAMING_SNAKE_CASE ) -> str:
__UpperCamelCase = 'UNwant\u00E9d,running'
__UpperCamelCase = 'unwanted, running'
return input_text, output_text
def __lowercase( self ) -> Union[str, Any]:
__UpperCamelCase = self.tokenizer_class(self.vocab_file )
__UpperCamelCase = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [9, 6, 7, 12, 10, 11] )
def __lowercase( self ) -> Dict:
if not self.test_rust_tokenizer:
return
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = self.get_rust_tokenizer()
__UpperCamelCase = 'UNwant\u00E9d,running'
__UpperCamelCase = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__UpperCamelCase = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__UpperCamelCase = self.get_rust_tokenizer()
__UpperCamelCase = tokenizer.encode(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# With lower casing
__UpperCamelCase = self.get_tokenizer(do_lower_case=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = self.get_rust_tokenizer(do_lower_case=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = 'UNwant\u00E9d,running'
__UpperCamelCase = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__UpperCamelCase = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__UpperCamelCase = self.get_rust_tokenizer()
__UpperCamelCase = tokenizer.encode(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowercase( self ) -> List[Any]:
__UpperCamelCase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def __lowercase( self ) -> Optional[int]:
__UpperCamelCase = BasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def __lowercase( self ) -> Optional[Any]:
__UpperCamelCase = BasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def __lowercase( self ) -> Dict:
__UpperCamelCase = BasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def __lowercase( self ) -> Tuple:
__UpperCamelCase = BasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def __lowercase( self ) -> Dict:
__UpperCamelCase = BasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __lowercase( self ) -> List[str]:
__UpperCamelCase = BasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __lowercase( self ) -> str:
__UpperCamelCase = BasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __lowercase( self ) -> str:
__UpperCamelCase = BasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def __lowercase( self ) -> Any:
__UpperCamelCase = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__UpperCamelCase = {}
for i, token in enumerate(_SCREAMING_SNAKE_CASE ):
__UpperCamelCase = i
__UpperCamelCase = WordpieceTokenizer(vocab=_SCREAMING_SNAKE_CASE , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def __lowercase( self ) -> List[Any]:
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def __lowercase( self ) -> Tuple:
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def __lowercase( self ) -> Any:
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def __lowercase( self ) -> Tuple:
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
@slow
def __lowercase( self ) -> List[str]:
__UpperCamelCase = self.tokenizer_class.from_pretrained('google/mobilebert-uncased' )
__UpperCamelCase = tokenizer.encode('sequence builders' , add_special_tokens=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = tokenizer.encode('multi-sequence build' , add_special_tokens=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def __lowercase( self ) -> str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__UpperCamelCase = self.rust_tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__UpperCamelCase = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
__UpperCamelCase = tokenizer_r.encode_plus(
_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE , return_offsets_mapping=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , )
__UpperCamelCase = tokenizer_r.do_lower_case if hasattr(_SCREAMING_SNAKE_CASE , 'do_lower_case' ) else False
__UpperCamelCase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def __lowercase( self ) -> int:
__UpperCamelCase = ['的', '人', '有']
__UpperCamelCase = ''.join(_SCREAMING_SNAKE_CASE )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__UpperCamelCase = True
__UpperCamelCase = self.tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__UpperCamelCase = self.rust_tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__UpperCamelCase = tokenizer_p.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = tokenizer_r.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = tokenizer_r.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = tokenizer_p.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__UpperCamelCase = False
__UpperCamelCase = self.rust_tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__UpperCamelCase = self.tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__UpperCamelCase = tokenizer_r.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = tokenizer_p.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = tokenizer_r.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = tokenizer_p.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
# it is expected that only the first Chinese character is not preceded by "##".
__UpperCamelCase = [
f"""##{token}""" if idx != 0 else token for idx, token in enumerate(_SCREAMING_SNAKE_CASE )
]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 567
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class lowerCAmelCase_ ( _lowercase ):
"""simple docstring"""
UpperCAmelCase__ = "gpt_bigcode"
UpperCAmelCase__ = ["past_key_values"]
UpperCAmelCase__ = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , _SCREAMING_SNAKE_CASE=50_257 , _SCREAMING_SNAKE_CASE=1_024 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="gelu_pytorch_tanh" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=50_256 , _SCREAMING_SNAKE_CASE=50_256 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , ) -> Optional[int]:
__UpperCamelCase = vocab_size
__UpperCamelCase = n_positions
__UpperCamelCase = n_embd
__UpperCamelCase = n_layer
__UpperCamelCase = n_head
__UpperCamelCase = n_inner
__UpperCamelCase = activation_function
__UpperCamelCase = resid_pdrop
__UpperCamelCase = embd_pdrop
__UpperCamelCase = attn_pdrop
__UpperCamelCase = layer_norm_epsilon
__UpperCamelCase = initializer_range
__UpperCamelCase = scale_attn_weights
__UpperCamelCase = use_cache
__UpperCamelCase = attention_softmax_in_fpaa
__UpperCamelCase = scale_attention_softmax_in_fpaa
__UpperCamelCase = multi_query
__UpperCamelCase = bos_token_id
__UpperCamelCase = eos_token_id
super().__init__(bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 567
| 1
|
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("1.0.0a"):
raise Exception("requires fairseq >= 1.0.0a")
logging.set_verbosity_info()
__a : Union[str, Any] = logging.get_logger(__name__)
__a : Tuple = "Hello world! cécé herlolip"
def _SCREAMING_SNAKE_CASE ( __lowercase : str , __lowercase : str , __lowercase : bool ) -> List[Any]:
"""simple docstring"""
__A = FairseqRobertaModel.from_pretrained(__lowercase )
roberta.eval() # disable dropout
__A = roberta.model.encoder.sentence_encoder
__A = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
__A = roberta.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our RoBERTa config:""" , __lowercase )
__A = XLMRobertaXLForSequenceClassification(__lowercase ) if classification_head else XLMRobertaXLForMaskedLM(__lowercase )
model.eval()
# Now let's copy all the weights.
# Embeddings
__A = roberta_sent_encoder.embed_tokens.weight
__A = roberta_sent_encoder.embed_positions.weight
__A = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
__A = roberta_sent_encoder.layer_norm.weight
__A = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__A = model.roberta.encoder.layer[i]
__A = roberta_sent_encoder.layers[i]
__A = layer.attention
__A = roberta_layer.self_attn_layer_norm.weight
__A = roberta_layer.self_attn_layer_norm.bias
# self attention
__A = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
__A = roberta_layer.self_attn.q_proj.weight
__A = roberta_layer.self_attn.q_proj.bias
__A = roberta_layer.self_attn.k_proj.weight
__A = roberta_layer.self_attn.k_proj.bias
__A = roberta_layer.self_attn.v_proj.weight
__A = roberta_layer.self_attn.v_proj.bias
# self-attention output
__A = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
__A = roberta_layer.self_attn.out_proj.weight
__A = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
__A = roberta_layer.final_layer_norm.weight
__A = roberta_layer.final_layer_norm.bias
# intermediate
__A = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
__A = roberta_layer.fca.weight
__A = roberta_layer.fca.bias
# output
__A = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
__A = roberta_layer.fca.weight
__A = roberta_layer.fca.bias
# end of layer
if classification_head:
__A = roberta.model.classification_heads["""mnli"""].dense.weight
__A = roberta.model.classification_heads["""mnli"""].dense.bias
__A = roberta.model.classification_heads["""mnli"""].out_proj.weight
__A = roberta.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
__A = roberta.model.encoder.lm_head.dense.weight
__A = roberta.model.encoder.lm_head.dense.bias
__A = roberta.model.encoder.lm_head.layer_norm.weight
__A = roberta.model.encoder.lm_head.layer_norm.bias
__A = roberta.model.encoder.lm_head.weight
__A = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
__A = roberta.encode(__lowercase ).unsqueeze(0 ) # batch of size 1
__A = model(__lowercase )[0]
if classification_head:
__A = roberta.model.classification_heads["""mnli"""](roberta.extract_features(__lowercase ) )
else:
__A = roberta.model(__lowercase )[0]
print(our_output.shape , their_output.shape )
__A = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7
__A = torch.allclose(__lowercase , __lowercase , atol=1E-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
pathlib.Path(__lowercase ).mkdir(parents=__lowercase , exist_ok=__lowercase )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(__lowercase )
if __name__ == "__main__":
__a : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
__a : Dict = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 637
|
from __future__ import annotations
from typing import Any
class __lowercase :
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : float = 0 ):
"""simple docstring"""
__A , __A = row, column
__A = [[default_value for c in range(UpperCamelCase_ )] for r in range(UpperCamelCase_ )]
def __str__( self : Any ):
"""simple docstring"""
__A = F"Matrix consist of {self.row} rows and {self.column} columns\n"
# Make string identifier
__A = 0
for row_vector in self.array:
for obj in row_vector:
__A = max(UpperCamelCase_ , len(str(UpperCamelCase_ ) ) )
__A = F"%{max_element_length}s"
# Make string and return
def single_line(UpperCamelCase_ : list[float] ) -> str:
nonlocal string_format_identifier
__A = """["""
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(UpperCamelCase_ ) for row_vector in self.array )
return s
def __repr__( self : Optional[Any] ):
"""simple docstring"""
return str(self )
def lowerCAmelCase_ ( self : Optional[int] , UpperCamelCase_ : tuple[int, int] ):
"""simple docstring"""
if not (isinstance(UpperCamelCase_ , (list, tuple) ) and len(UpperCamelCase_ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Optional[int] , UpperCamelCase_ : tuple[int, int] ):
"""simple docstring"""
assert self.validate_indicies(UpperCamelCase_ )
return self.array[loc[0]][loc[1]]
def __setitem__( self : str , UpperCamelCase_ : tuple[int, int] , UpperCamelCase_ : float ):
"""simple docstring"""
assert self.validate_indicies(UpperCamelCase_ )
__A = value
def __add__( self : Optional[int] , UpperCamelCase_ : Matrix ):
"""simple docstring"""
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
assert self.row == another.row and self.column == another.column
# Add
__A = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__A = self[r, c] + another[r, c]
return result
def __neg__( self : Any ):
"""simple docstring"""
__A = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__A = -self[r, c]
return result
def __sub__( self : List[Any] , UpperCamelCase_ : Matrix ):
"""simple docstring"""
return self + (-another)
def __mul__( self : Union[str, Any] , UpperCamelCase_ : int | float | Matrix ):
"""simple docstring"""
if isinstance(UpperCamelCase_ , (int, float) ): # Scalar multiplication
__A = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__A = self[r, c] * another
return result
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ): # Matrix multiplication
assert self.column == another.row
__A = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__A = F"Unsupported type given for another ({type(UpperCamelCase_ )})"
raise TypeError(UpperCamelCase_ )
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
__A = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__A = self[r, c]
return result
def lowerCAmelCase_ ( self : List[str] , UpperCamelCase_ : Matrix , UpperCamelCase_ : Matrix ):
"""simple docstring"""
assert isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(UpperCamelCase_ , UpperCamelCase_ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__A = v.transpose()
__A = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def _SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
__A = Matrix(3 , 3 , 0 )
for i in range(3 ):
__A = 1
print(f"a^(-1) is {ainv}" )
# u, v
__A = Matrix(3 , 1 , 0 )
__A , __A , __A = 1, 2, -3
__A = Matrix(3 , 1 , 0 )
__A , __A , __A = 4, -2, 5
print(f"u is {u}" )
print(f"v is {v}" )
print(f"uv^T is {u * v.transpose()}" )
# Sherman Morrison
print(f"(a + uv^T)^(-1) is {ainv.sherman_morrison(__lowercase , __lowercase )}" )
def _SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 637
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger(__name__)
def _snake_case ( __snake_case ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ : Any = '''huggingface/label-files'''
UpperCAmelCase_ : Any = '''imagenet-1k-id2label.json'''
UpperCAmelCase_ : Any = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ : Union[str, Any] = {int(_lowercase ): v for k, v in idalabel.items()}
UpperCAmelCase_ : Dict = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ : str = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
UpperCAmelCase_ : Optional[int] = BitConfig(
conv_layer=_lowercase , num_labels=1_0_0_0 , idalabel=_lowercase , labelaid=_lowercase , )
return config
def _snake_case ( __snake_case ) -> Optional[int]:
'''simple docstring'''
if "stem.conv" in name:
UpperCAmelCase_ : Optional[int] = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
UpperCAmelCase_ : Tuple = name.replace("blocks" , "layers" )
if "head.fc" in name:
UpperCAmelCase_ : Union[str, Any] = name.replace("head.fc" , "classifier.1" )
if name.startswith("norm" ):
UpperCAmelCase_ : str = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
UpperCAmelCase_ : Union[str, Any] = '''bit.encoder.''' + name
return name
def _snake_case ( ) -> str:
'''simple docstring'''
UpperCAmelCase_ : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase_ : List[str] = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def _snake_case ( __snake_case , __snake_case , __snake_case=False ) -> int:
'''simple docstring'''
UpperCAmelCase_ : List[Any] = get_config(_lowercase )
# load original model from timm
UpperCAmelCase_ : Optional[Any] = create_model(_lowercase , pretrained=_lowercase )
timm_model.eval()
# load state_dict of original model
UpperCAmelCase_ : Optional[int] = timm_model.state_dict()
for key in state_dict.copy().keys():
UpperCAmelCase_ : Dict = state_dict.pop(_lowercase )
UpperCAmelCase_ : Optional[int] = val.squeeze() if '''head''' in key else val
# load HuggingFace model
UpperCAmelCase_ : str = BitForImageClassification(_lowercase )
model.eval()
model.load_state_dict(_lowercase )
# create image processor
UpperCAmelCase_ : Optional[Any] = create_transform(**resolve_data_config({} , model=_lowercase ) )
UpperCAmelCase_ : List[str] = transform.transforms
UpperCAmelCase_ : Union[str, Any] = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
UpperCAmelCase_ : Tuple = BitImageProcessor(
do_resize=_lowercase , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_lowercase , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=_lowercase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
UpperCAmelCase_ : Any = prepare_img()
UpperCAmelCase_ : Union[str, Any] = transform(_lowercase ).unsqueeze(0 )
UpperCAmelCase_ : Optional[int] = processor(_lowercase , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(_lowercase , _lowercase )
# verify logits
with torch.no_grad():
UpperCAmelCase_ : Dict = model(_lowercase )
UpperCAmelCase_ : Optional[Any] = outputs.logits
print("Logits:" , logits[0, :3] )
print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] )
UpperCAmelCase_ : List[Any] = timm_model(_lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowercase , outputs.logits , atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(F"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
processor.save_pretrained(_lowercase )
if push_to_hub:
print(F"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(F"""ybelkada/{model_name}""" )
processor.push_to_hub(F"""ybelkada/{model_name}""" )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''resnetv2_50x1_bitm''',
type=str,
help='''Name of the BiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model to the hub.''',
)
__lowerCamelCase = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 712
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''',
}
class snake_case_ (lowercase__ , lowercase__ ):
"""simple docstring"""
_lowerCamelCase = """focalnet"""
def __init__( self ,lowercase=224 ,lowercase=4 ,lowercase=3 ,lowercase=96 ,lowercase=False ,lowercase=[192, 384, 768, 768] ,lowercase=[2, 2, 6, 2] ,lowercase=[2, 2, 2, 2] ,lowercase=[3, 3, 3, 3] ,lowercase="gelu" ,lowercase=4.0 ,lowercase=0.0 ,lowercase=0.1 ,lowercase=False ,lowercase=1E-4 ,lowercase=False ,lowercase=False ,lowercase=False ,lowercase=0.02 ,lowercase=1E-5 ,lowercase=32 ,lowercase=None ,lowercase=None ,**lowercase ,):
"""simple docstring"""
super().__init__(**lowercase)
UpperCAmelCase_ : int = image_size
UpperCAmelCase_ : Optional[Any] = patch_size
UpperCAmelCase_ : Dict = num_channels
UpperCAmelCase_ : Dict = embed_dim
UpperCAmelCase_ : Optional[int] = use_conv_embed
UpperCAmelCase_ : int = hidden_sizes
UpperCAmelCase_ : Optional[int] = depths
UpperCAmelCase_ : Optional[Any] = focal_levels
UpperCAmelCase_ : Tuple = focal_windows
UpperCAmelCase_ : int = hidden_act
UpperCAmelCase_ : Any = mlp_ratio
UpperCAmelCase_ : List[Any] = hidden_dropout_prob
UpperCAmelCase_ : Tuple = drop_path_rate
UpperCAmelCase_ : List[str] = use_layerscale
UpperCAmelCase_ : List[str] = layerscale_value
UpperCAmelCase_ : List[str] = use_post_layernorm
UpperCAmelCase_ : Dict = use_post_layernorm_in_modulation
UpperCAmelCase_ : int = normalize_modulator
UpperCAmelCase_ : Dict = initializer_range
UpperCAmelCase_ : str = layer_norm_eps
UpperCAmelCase_ : Union[str, Any] = encoder_stride
UpperCAmelCase_ : str = ["stem"] + [F"""stage{idx}""" for idx in range(1 ,len(self.depths) + 1)]
UpperCAmelCase_ , UpperCAmelCase_ : Any = get_aligned_output_features_output_indices(
out_features=lowercase ,out_indices=lowercase ,stage_names=self.stage_names)
| 455
| 0
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class A__ ( __SCREAMING_SNAKE_CASE ):
lowerCamelCase__ : Tuple =(DEISMultistepScheduler,)
lowerCamelCase__ : Any =(("num_inference_steps", 25),)
def lowercase ( self , **lowerCamelCase ) -> Tuple:
"""simple docstring"""
__magic_name__ : List[Any] = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
}
config.update(**lowerCamelCase )
return config
def lowercase ( self , lowerCamelCase=0 , **lowerCamelCase ) -> int:
"""simple docstring"""
__magic_name__ : Optional[Any] = dict(self.forward_default_kwargs )
__magic_name__ : List[str] = kwargs.pop('''num_inference_steps''' , lowerCamelCase )
__magic_name__ : int = self.dummy_sample
__magic_name__ : Any = 0.1 * sample
__magic_name__ : int = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
__magic_name__ : Optional[Any] = self.get_scheduler_config(**lowerCamelCase )
__magic_name__ : List[Any] = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residuals
__magic_name__ : List[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase )
__magic_name__ : Dict = scheduler_class.from_pretrained(lowerCamelCase )
new_scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residuals
__magic_name__ : List[str] = dummy_past_residuals[: new_scheduler.config.solver_order]
__magic_name__ , __magic_name__ : Tuple = sample, sample
for t in range(lowerCamelCase , time_step + scheduler.config.solver_order + 1 ):
__magic_name__ : Tuple = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
__magic_name__ : Tuple = new_scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowercase ( self ) -> List[str]:
"""simple docstring"""
pass
def lowercase ( self , lowerCamelCase=0 , **lowerCamelCase ) -> str:
"""simple docstring"""
__magic_name__ : Optional[Any] = dict(self.forward_default_kwargs )
__magic_name__ : Union[str, Any] = kwargs.pop('''num_inference_steps''' , lowerCamelCase )
__magic_name__ : Union[str, Any] = self.dummy_sample
__magic_name__ : Dict = 0.1 * sample
__magic_name__ : str = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
__magic_name__ : int = self.get_scheduler_config()
__magic_name__ : int = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
__magic_name__ : Dict = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase )
__magic_name__ : int = scheduler_class.from_pretrained(lowerCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residual (must be after setting timesteps)
__magic_name__ : Any = dummy_past_residuals[: new_scheduler.config.solver_order]
__magic_name__ : Any = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
__magic_name__ : str = new_scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowercase ( self , lowerCamelCase=None , **lowerCamelCase ) -> str:
"""simple docstring"""
if scheduler is None:
__magic_name__ : Union[str, Any] = self.scheduler_classes[0]
__magic_name__ : str = self.get_scheduler_config(**lowerCamelCase )
__magic_name__ : Union[str, Any] = scheduler_class(**lowerCamelCase )
__magic_name__ : List[Any] = self.scheduler_classes[0]
__magic_name__ : Optional[Any] = self.get_scheduler_config(**lowerCamelCase )
__magic_name__ : Tuple = scheduler_class(**lowerCamelCase )
__magic_name__ : Union[str, Any] = 10
__magic_name__ : Tuple = self.dummy_model()
__magic_name__ : List[str] = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
__magic_name__ : str = model(lowerCamelCase , lowerCamelCase )
__magic_name__ : str = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase ).prev_sample
return sample
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
__magic_name__ : List[str] = dict(self.forward_default_kwargs )
__magic_name__ : Optional[int] = kwargs.pop('''num_inference_steps''' , lowerCamelCase )
for scheduler_class in self.scheduler_classes:
__magic_name__ : Any = self.get_scheduler_config()
__magic_name__ : Optional[Any] = scheduler_class(**lowerCamelCase )
__magic_name__ : Optional[int] = self.dummy_sample
__magic_name__ : Optional[int] = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCamelCase , '''set_timesteps''' ):
scheduler.set_timesteps(lowerCamelCase )
elif num_inference_steps is not None and not hasattr(lowerCamelCase , '''set_timesteps''' ):
__magic_name__ : Dict = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__magic_name__ : Any = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
__magic_name__ : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order]
__magic_name__ : int = scheduler.timesteps[5]
__magic_name__ : Any = scheduler.timesteps[6]
__magic_name__ : List[str] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
__magic_name__ : Any = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowercase ( self ) -> List[str]:
"""simple docstring"""
__magic_name__ : Union[str, Any] = DEISMultistepScheduler(**self.get_scheduler_config() )
__magic_name__ : Dict = self.full_loop(scheduler=lowerCamelCase )
__magic_name__ : Any = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
__magic_name__ : Any = DPMSolverSinglestepScheduler.from_config(scheduler.config )
__magic_name__ : Any = DPMSolverMultistepScheduler.from_config(scheduler.config )
__magic_name__ : Optional[Any] = UniPCMultistepScheduler.from_config(scheduler.config )
__magic_name__ : Any = DEISMultistepScheduler.from_config(scheduler.config )
__magic_name__ : List[Any] = self.full_loop(scheduler=lowerCamelCase )
__magic_name__ : Union[str, Any] = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase )
def lowercase ( self ) -> str:
"""simple docstring"""
self.check_over_configs(thresholding=lowerCamelCase )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCamelCase , prediction_type=lowerCamelCase , sample_max_value=lowerCamelCase , algorithm_type='''deis''' , solver_order=lowerCamelCase , solver_type=lowerCamelCase , )
def lowercase ( self ) -> Dict:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase )
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCamelCase , solver_type=lowerCamelCase , prediction_type=lowerCamelCase , algorithm_type=lowerCamelCase , )
__magic_name__ : Union[str, Any] = self.full_loop(
solver_order=lowerCamelCase , solver_type=lowerCamelCase , prediction_type=lowerCamelCase , algorithm_type=lowerCamelCase , )
assert not torch.isnan(lowerCamelCase ).any(), "Samples have nan numbers"
def lowercase ( self ) -> List[str]:
"""simple docstring"""
self.check_over_configs(lower_order_final=lowerCamelCase )
self.check_over_configs(lower_order_final=lowerCamelCase )
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=lowerCamelCase , time_step=0 )
def lowercase ( self ) -> Any:
"""simple docstring"""
__magic_name__ : Any = self.full_loop()
__magic_name__ : str = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ : Tuple = self.full_loop(prediction_type='''v_prediction''' )
__magic_name__ : List[str] = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_mean.item() - 0.0_9_1 ) < 1e-3
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ : Tuple = self.scheduler_classes[0]
__magic_name__ : Optional[int] = self.get_scheduler_config(thresholding=lowerCamelCase , dynamic_thresholding_ratio=0 )
__magic_name__ : Optional[Any] = scheduler_class(**lowerCamelCase )
__magic_name__ : Optional[Any] = 10
__magic_name__ : int = self.dummy_model()
__magic_name__ : int = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
__magic_name__ : List[str] = model(lowerCamelCase , lowerCamelCase )
__magic_name__ : Optional[int] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase ).prev_sample
assert sample.dtype == torch.floataa
| 154
|
import math
from numpy import inf
from scipy.integrate import quad
def lowerCAmelCase ( UpperCAmelCase ) ->float:
"""simple docstring"""
if num <= 0:
raise ValueError('''math domain error''' )
return quad(UpperCAmelCase, 0, UpperCAmelCase, args=(UpperCAmelCase) )[0]
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase ) ->float:
"""simple docstring"""
return math.pow(UpperCAmelCase, z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 154
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
A_ = logging.get_logger(__name__)
class UpperCamelCase__ ( _UpperCamelCase ):
'''simple docstring'''
_snake_case = ['''pixel_values''']
def __init__( self , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = 1 / 2_55 , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = True , **SCREAMING_SNAKE_CASE , ) -> List[str]:
super().__init__(**_UpperCAmelCase )
__lowerCAmelCase : Optional[int] = size if size is not None else {'''height''': 3_84, '''width''': 3_84}
__lowerCAmelCase : str = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
__lowerCAmelCase : str = do_resize
__lowerCAmelCase : Dict = size
__lowerCAmelCase : Union[str, Any] = resample
__lowerCAmelCase : Union[str, Any] = do_rescale
__lowerCAmelCase : Tuple = rescale_factor
__lowerCAmelCase : Optional[Any] = do_normalize
__lowerCAmelCase : Tuple = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__lowerCAmelCase : Any = image_std if image_std is not None else OPENAI_CLIP_STD
__lowerCAmelCase : List[str] = do_convert_rgb
def snake_case ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> str:
__lowerCAmelCase : str = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
__lowerCAmelCase : List[Any] = (size['''height'''], size['''width'''])
return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def snake_case ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> int:
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def snake_case ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> Tuple:
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def snake_case ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE , ) -> Any:
__lowerCAmelCase : int = do_resize if do_resize is not None else self.do_resize
__lowerCAmelCase : Dict = resample if resample is not None else self.resample
__lowerCAmelCase : Dict = do_rescale if do_rescale is not None else self.do_rescale
__lowerCAmelCase : str = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCAmelCase : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
__lowerCAmelCase : Optional[int] = image_mean if image_mean is not None else self.image_mean
__lowerCAmelCase : List[Any] = image_std if image_std is not None else self.image_std
__lowerCAmelCase : int = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowerCAmelCase : List[Any] = size if size is not None else self.size
__lowerCAmelCase : List[Any] = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
__lowerCAmelCase : Optional[int] = make_list_of_images(_UpperCAmelCase )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowerCAmelCase : Dict = [convert_to_rgb(_UpperCAmelCase ) for image in images]
# All transformations expect numpy arrays.
__lowerCAmelCase : Tuple = [to_numpy_array(_UpperCAmelCase ) for image in images]
if do_resize:
__lowerCAmelCase : Tuple = [self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase ) for image in images]
if do_rescale:
__lowerCAmelCase : List[str] = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase ) for image in images]
if do_normalize:
__lowerCAmelCase : Union[str, Any] = [self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase ) for image in images]
__lowerCAmelCase : List[str] = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
__lowerCAmelCase : List[str] = BatchFeature(data={'pixel_values': images} , tensor_type=_UpperCAmelCase )
return encoded_outputs
| 713
|
'''simple docstring'''
from math import factorial
A_ = {str(digit): factorial(digit) for digit in range(10)}
def A ( _UpperCAmelCase : int ) -> int:
'''simple docstring'''
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
raise TypeError('Parameter number must be int' )
if number < 0:
raise ValueError('Parameter number must be greater than or equal to 0' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(_UpperCAmelCase ) )
def A ( _UpperCAmelCase : int = 6_0 ,_UpperCAmelCase : int = 1_0_0_0_0_0_0 ) -> int:
'''simple docstring'''
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ) or not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
raise TypeError('Parameters chain_length and number_limit must be int' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'Parameters chain_length and number_limit must be greater than 0' )
# the counter for the chains with the exact desired length
__lowerCAmelCase : Any = 0
# the cached sizes of the previous chains
__lowerCAmelCase : dict[int, int] = {}
for start_chain_element in range(1 ,_UpperCAmelCase ):
# The temporary set will contain the elements of the chain
__lowerCAmelCase : Union[str, Any] = set()
__lowerCAmelCase : Union[str, Any] = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
__lowerCAmelCase : List[str] = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(_UpperCAmelCase )
chain_set_length += 1
__lowerCAmelCase : Optional[Any] = digit_factorial_sum(_UpperCAmelCase )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
__lowerCAmelCase : Any = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{solution()}''')
| 123
| 0
|
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_ ( _lowerCamelCase : list[int] , _lowerCamelCase : list[int] , _lowerCamelCase : int ):
lowerCamelCase_ = list(range(len(_lowerCamelCase ) ) )
lowerCamelCase_ = [v / w for v, w in zip(_lowerCamelCase , _lowerCamelCase )]
index.sort(key=lambda _lowerCamelCase : ratio[i] , reverse=_lowerCamelCase )
lowerCamelCase_ = 0
lowerCamelCase_ = [0] * len(_lowerCamelCase )
for i in index:
if weight[i] <= capacity:
lowerCamelCase_ = 1
max_value += value[i]
capacity -= weight[i]
else:
lowerCamelCase_ = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 142
|
"""simple docstring"""
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
__lowercase : int = {
"""<""": operator.lt,
"""<=""": operator.le,
"""==""": operator.eq,
"""!=""": operator.ne,
""">=""": operator.ge,
""">""": operator.gt,
}
def lowerCamelCase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : List[str] , _lowerCamelCase : Any , _lowerCamelCase : Dict ):
if got_ver is None or want_ver is None:
raise ValueError(
F"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"""
F""" reinstalling {pkg}.""" )
if not ops[op](version.parse(_lowerCamelCase ) , version.parse(_lowerCamelCase ) ):
raise ImportError(
F"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" )
def lowerCamelCase_ ( _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ):
lowerCamelCase_ = F"""\n{hint}""" if hint is not None else ''''''
# non-versioned check
if re.match(r'''^[\w_\-\d]+$''' , _lowerCamelCase ):
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = requirement, None, None
else:
lowerCamelCase_ = re.findall(r'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''' , _lowerCamelCase )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
F""" got {requirement}""" )
lowerCamelCase_ , lowerCamelCase_ = match[0]
lowerCamelCase_ = want_full.split(''',''' ) # there could be multiple requirements
lowerCamelCase_ = {}
for w in want_range:
lowerCamelCase_ = re.findall(r'''^([\s!=<>]{1,2})(.+)''' , _lowerCamelCase )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
F""" but got {requirement}""" )
lowerCamelCase_ , lowerCamelCase_ = match[0]
lowerCamelCase_ = want_ver
if op not in ops:
raise ValueError(F"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" )
# special case
if pkg == "python":
lowerCamelCase_ = '''.'''.join([str(_lowerCamelCase ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return
# check if any version is installed
try:
lowerCamelCase_ = importlib.metadata.version(_lowerCamelCase )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F"""The '{requirement}' distribution was not found and is required by this application. {hint}""" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase : List[Any] ):
lowerCamelCase_ = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(_lowerCamelCase , _lowerCamelCase )
| 142
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class snake_case ( unittest.TestCase):
def __init__( self : int , a__ : Dict , a__ : Optional[Any]=13 , a__ : str=7 , a__ : Optional[int]=True , a__ : int=True , a__ : Optional[Any]=True , a__ : List[Any]=True , a__ : Optional[int]=99 , a__ : str=32 , a__ : Union[str, Any]=5 , a__ : Union[str, Any]=4 , a__ : Tuple=37 , a__ : str="gelu" , a__ : str=0.1 , a__ : List[str]=0.1 , a__ : Dict=5_12 , a__ : Any=16 , a__ : List[str]=2 , a__ : Optional[Any]=0.0_2 , a__ : int=4 , ) -> Dict:
'''simple docstring'''
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_attention_mask
_A = use_token_type_ids
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_choices
def a_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_attention_mask:
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=a__ , )
return config, input_ids, attention_mask
def a_ ( self : int ) -> Optional[int]:
'''simple docstring'''
_A = self.prepare_config_and_inputs()
_A , _A , _A = config_and_inputs
_A = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class snake_case ( _UpperCamelCase , unittest.TestCase):
__UpperCamelCase = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def a_ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
_A = FlaxDistilBertModelTester(self )
@slow
def a_ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_A = model_class_name.from_pretrained("distilbert-base-uncased" )
_A = model(np.ones((1, 1) ) )
self.assertIsNotNone(a__ )
@require_flax
class snake_case ( unittest.TestCase):
@slow
def a_ ( self : Tuple ) -> Tuple:
'''simple docstring'''
_A = FlaxDistilBertModel.from_pretrained("distilbert-base-uncased" )
_A = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
_A = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_A = model(a__ , attention_mask=a__ )[0]
_A = (1, 11, 7_68)
self.assertEqual(output.shape , a__ )
_A = np.array([[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , a__ , atol=1E-4 ) )
| 621
|
"""simple docstring"""
def a__ ( __lowercase , __lowercase ) -> int:
while a != 0:
_A , _A = b % a, a
return b
def a__ ( __lowercase , __lowercase ) -> int:
if gcd(__lowercase , __lowercase ) != 1:
_A = f"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(__lowercase )
_A , _A , _A = 1, 0, a
_A , _A , _A = 0, 1, m
while va != 0:
_A = ua // va
_A , _A , _A , _A , _A , _A = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 621
| 1
|
"""simple docstring"""
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
__A = TypeVar('''T''')
class _snake_case ( Generic[T] ):
snake_case__ = 42 # Cache store of keys
snake_case__ = 42 # References of the keys in cache
snake_case__ = 10 # Maximum capacity of cache
def __init__( self : Union[str, Any] , UpperCAmelCase : int ):
__lowerCamelCase : int = deque()
__lowerCamelCase : Dict = set()
if not n:
__lowerCamelCase : Optional[int] = sys.maxsize
elif n < 0:
raise ValueError("n should be an integer greater than 0." )
else:
__lowerCamelCase : Optional[int] = n
def lowerCamelCase__ ( self : Union[str, Any] , UpperCAmelCase : T ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
__lowerCamelCase : str = self.dq_store.pop()
self.key_reference.remove(__lowercase )
else:
self.dq_store.remove(__lowercase )
self.dq_store.appendleft(__lowercase )
self.key_reference.add(__lowercase )
def lowerCamelCase__ ( self : Optional[int] ):
for k in self.dq_store:
print(__lowercase )
def __repr__( self : Union[str, Any] ):
return F"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
__A = LRUCache(4)
lru_cache.refer('''A''')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('''A''')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 646
|
'''simple docstring'''
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def _a( UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =[False] * len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] =[-1] * len(UpperCamelCase__ )
def dfs(UpperCamelCase__ : Optional[int], UpperCamelCase__ : Tuple ):
SCREAMING_SNAKE_CASE__ : List[str] =True
SCREAMING_SNAKE_CASE__ : Optional[Any] =c
for u in graph[v]:
if not visited[u]:
dfs(UpperCamelCase__, 1 - c )
for i in range(len(UpperCamelCase__ ) ):
if not visited[i]:
dfs(UpperCamelCase__, 0 )
for i in range(len(UpperCamelCase__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
a_ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 296
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE_ = {'configuration_glpn': ['GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GLPNConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ['GLPNFeatureExtractor']
SCREAMING_SNAKE_CASE_ = ['GLPNImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'GLPN_PRETRAINED_MODEL_ARCHIVE_LIST',
'GLPNForDepthEstimation',
'GLPNLayer',
'GLPNModel',
'GLPNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 201
|
'''simple docstring'''
from math import pi, sqrt
def __lowercase ( __SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if num <= 0:
raise ValueError("""math domain error""" )
if num > 171.5:
raise OverflowError("""math range error""" )
elif num - int(__SCREAMING_SNAKE_CASE ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(__SCREAMING_SNAKE_CASE )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def __lowercase ( ) -> None:
"""simple docstring"""
assert gamma(0.5 ) == sqrt(__SCREAMING_SNAKE_CASE )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE_ = 1.0
while num:
SCREAMING_SNAKE_CASE_ = float(input('Gamma of: '))
print(f"""gamma({num}) = {gamma(num)}""")
print('\nEnter 0 to exit...')
| 201
| 1
|
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
lowerCamelCase__ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"transformer.encoder.layers.{i}.self_attn.out_proj.weight", F"encoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(F"transformer.encoder.layers.{i}.self_attn.out_proj.bias", F"encoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.weight", F"encoder.layers.{i}.fc1.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.bias", F"encoder.layers.{i}.fc1.bias"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.weight", F"encoder.layers.{i}.fc2.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.bias", F"encoder.layers.{i}.fc2.bias"))
rename_keys.append(
(F"transformer.encoder.layers.{i}.norm1.weight", F"encoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((F"transformer.encoder.layers.{i}.norm1.bias", F"encoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.weight", F"encoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.bias", F"encoder.layers.{i}.final_layer_norm.bias"))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"transformer.decoder.layers.{i}.self_attn.out_proj.weight", F"decoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.self_attn.out_proj.bias", F"decoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append(
(
F"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight",
F"decoder.layers.{i}.encoder_attn.out_proj.weight",
)
)
rename_keys.append(
(
F"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias",
F"decoder.layers.{i}.encoder_attn.out_proj.bias",
)
)
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.weight", F"decoder.layers.{i}.fc1.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.bias", F"decoder.layers.{i}.fc1.bias"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.weight", F"decoder.layers.{i}.fc2.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.bias", F"decoder.layers.{i}.fc2.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm1.weight", F"decoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((F"transformer.decoder.layers.{i}.norm1.bias", F"decoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm2.weight", F"decoder.layers.{i}.encoder_attn_layer_norm.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm2.bias", F"decoder.layers.{i}.encoder_attn_layer_norm.bias")
)
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.weight", F"decoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.bias", F"decoder.layers.{i}.final_layer_norm.bias"))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.encoder.norm.weight', 'encoder.layernorm.weight'),
('transformer.encoder.norm.bias', 'encoder.layernorm.bias'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
]
)
def _lowerCamelCase( __snake_case , __snake_case , __snake_case ) -> str:
__snake_case = state_dict.pop(__snake_case )
__snake_case = val
def _lowerCamelCase( __snake_case ) -> str:
__snake_case = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
__snake_case = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
__snake_case = value
else:
__snake_case = value
return new_state_dict
def _lowerCamelCase( __snake_case ) -> Optional[int]:
__snake_case = ""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__snake_case = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
__snake_case = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__snake_case = in_proj_weight[:256, :]
__snake_case = in_proj_bias[:256]
__snake_case = in_proj_weight[256:512, :]
__snake_case = in_proj_bias[256:512]
__snake_case = in_proj_weight[-256:, :]
__snake_case = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
__snake_case = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
__snake_case = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__snake_case = in_proj_weight[:256, :]
__snake_case = in_proj_bias[:256]
__snake_case = in_proj_weight[256:512, :]
__snake_case = in_proj_bias[256:512]
__snake_case = in_proj_weight[-256:, :]
__snake_case = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
__snake_case = state_dict.pop(
f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
__snake_case = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
__snake_case = in_proj_weight_cross_attn[:256, :]
__snake_case = in_proj_bias_cross_attn[:256]
__snake_case = in_proj_weight_cross_attn[256:512, :]
__snake_case = in_proj_bias_cross_attn[256:512]
__snake_case = in_proj_weight_cross_attn[-256:, :]
__snake_case = in_proj_bias_cross_attn[-256:]
def _lowerCamelCase( __snake_case , __snake_case ) -> Any:
__snake_case , __snake_case = image.size
__snake_case = max(__snake_case , __snake_case )
__snake_case = 800 if "detection" in checkpoint_url else 1000
__snake_case = target_max_size / current_max_size
__snake_case = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def _lowerCamelCase( __snake_case ) -> Dict:
__snake_case = F.to_tensor(__snake_case )
__snake_case = F.normalize(__snake_case , mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] )
return image
@torch.no_grad()
def _lowerCamelCase( __snake_case , __snake_case , __snake_case ) -> Union[str, Any]:
logger.info("Converting model..." )
# load original state dict
__snake_case = torch.hub.load_state_dict_from_url(__snake_case , map_location="cpu" )
# rename keys
for src, dest in rename_keys:
rename_key(__snake_case , __snake_case , __snake_case )
__snake_case = rename_backbone_keys(__snake_case )
# query, key and value matrices need special treatment
read_in_q_k_v(__snake_case )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__snake_case = "model."
for key in state_dict.copy().keys():
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
__snake_case = state_dict.pop(__snake_case )
__snake_case = val
# create HuggingFace model and load state dict
__snake_case = TableTransformerConfig(
backbone="resnet18" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
__snake_case = 15
__snake_case = 2
__snake_case = {0: "table", 1: "table rotated"}
__snake_case = idalabel
__snake_case = {v: k for k, v in idalabel.items()}
else:
__snake_case = 125
__snake_case = 6
__snake_case = {
0: "table",
1: "table column",
2: "table row",
3: "table column header",
4: "table projected row header",
5: "table spanning cell",
}
__snake_case = idalabel
__snake_case = {v: k for k, v in idalabel.items()}
__snake_case = DetrImageProcessor(
format="coco_detection" , max_size=800 if "detection" in checkpoint_url else 1000 )
__snake_case = TableTransformerForObjectDetection(__snake_case )
model.load_state_dict(__snake_case )
model.eval()
# verify our conversion
__snake_case = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png"
__snake_case = hf_hub_download(repo_id="nielsr/example-pdf" , repo_type="dataset" , filename=__snake_case )
__snake_case = Image.open(__snake_case ).convert("RGB" )
__snake_case = normalize(resize(__snake_case , __snake_case ) ).unsqueeze(0 )
__snake_case = model(__snake_case )
if "detection" in checkpoint_url:
__snake_case = (1, 15, 3)
__snake_case = torch.tensor(
[[-6.7_8_9_7, -1_6.9_9_8_5, 6.7_9_3_7], [-8.0_1_8_6, -2_2.2_1_9_2, 6.9_6_7_7], [-7.3_1_1_7, -2_1.0_7_0_8, 7.4_0_5_5]] )
__snake_case = torch.tensor([[0.4_8_6_7, 0.1_7_6_7, 0.6_7_3_2], [0.6_7_1_8, 0.4_4_7_9, 0.3_8_3_0], [0.4_7_1_6, 0.1_7_6_0, 0.6_3_6_4]] )
else:
__snake_case = (1, 125, 7)
__snake_case = torch.tensor(
[[-1_8.1_4_3_0, -8.3_2_1_4, 4.8_2_7_4], [-1_8.4_6_8_5, -7.1_3_6_1, -4.2_6_6_7], [-2_6.3_6_9_3, -9.3_4_2_9, -4.9_9_6_2]] )
__snake_case = torch.tensor([[0.4_9_8_3, 0.5_5_9_5, 0.9_4_4_0], [0.4_9_1_6, 0.6_3_1_5, 0.5_9_5_4], [0.6_1_0_8, 0.8_6_3_7, 0.1_1_3_5]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , __snake_case , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , __snake_case , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
model.save_pretrained(__snake_case )
image_processor.save_pretrained(__snake_case )
if push_to_hub:
# Push model to HF hub
logger.info("Pushing model to the hub..." )
__snake_case = (
"microsoft/table-transformer-detection"
if "detection" in checkpoint_url
else "microsoft/table-transformer-structure-recognition"
)
model.push_to_hub(__snake_case )
image_processor.push_to_hub(__snake_case )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
type=str,
choices=[
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth',
],
help='URL of the Table Transformer checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowerCamelCase__ = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 524
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class UpperCamelCase ( snake_case__ ):
def __init__( self : Union[str, Any] ,_lowerCAmelCase : Optional[int] ):
"""simple docstring"""
__snake_case = data
def __iter__( self : Optional[Any] ):
"""simple docstring"""
for element in self.data:
yield element
def _lowerCamelCase( __snake_case=True ) -> str:
__snake_case = Accelerator(even_batches=__snake_case )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def _lowerCamelCase( __snake_case , __snake_case , __snake_case , __snake_case = False ) -> int:
if iterable:
__snake_case = DummyIterableDataset(torch.as_tensor(range(__snake_case ) ) )
else:
__snake_case = TensorDataset(torch.as_tensor(range(__snake_case ) ) )
__snake_case = DataLoader(__snake_case , batch_size=__snake_case )
__snake_case = accelerator.prepare(__snake_case )
return dl
def _lowerCamelCase( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> str:
__snake_case = create_dataloader(accelerator=__snake_case , dataset_size=__snake_case , batch_size=__snake_case )
__snake_case = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def _lowerCamelCase( ) -> Optional[Any]:
__snake_case = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
__snake_case , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
__snake_case , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def _lowerCamelCase( ) -> List[Any]:
__snake_case = create_accelerator(even_batches=__snake_case )
verify_dataloader_batch_sizes(
__snake_case , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
__snake_case , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def _lowerCamelCase( ) -> Optional[Any]:
__snake_case = create_accelerator(even_batches=__snake_case )
__snake_case = torch.nn.Linear(1 , 1 )
__snake_case = accelerator.prepare(__snake_case )
__snake_case = create_dataloader(__snake_case , dataset_size=3 , batch_size=1 )
__snake_case = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(__snake_case ):
__snake_case = ddp_model(batch[0].float() )
__snake_case = output.sum()
loss.backward()
batch_idxs.append(__snake_case )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def _lowerCamelCase( __snake_case ) -> Any:
with warnings.catch_warnings(record=__snake_case ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , __snake_case )
assert "only supported for multi-GPU" in str(w[-1].message )
def _lowerCamelCase( ) -> List[str]:
__snake_case = True
__snake_case = False
__snake_case = create_accelerator(even_batches=__snake_case )
__snake_case = torch.nn.Linear(1 , 1 )
__snake_case = accelerator.prepare(__snake_case )
__snake_case = create_dataloader(__snake_case , dataset_size=3 , batch_size=1 )
__snake_case = create_dataloader(__snake_case , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__snake_case ):
__snake_case = train_dl.batch_sampler.even_batches
__snake_case = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def _lowerCamelCase( ) -> List[str]:
__snake_case = True
__snake_case = False
__snake_case = create_accelerator(even_batches=__snake_case )
__snake_case = torch.nn.Linear(1 , 1 )
__snake_case = accelerator.prepare(__snake_case )
create_dataloader(__snake_case , dataset_size=3 , batch_size=1 , iterable=__snake_case )
__snake_case = create_dataloader(__snake_case , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("ignore" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__snake_case ):
__snake_case = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def _lowerCamelCase( ) -> Optional[Any]:
__snake_case = create_accelerator()
__snake_case = torch.nn.Linear(1 , 1 )
__snake_case = accelerator.prepare(__snake_case )
create_dataloader(__snake_case , dataset_size=3 , batch_size=1 , iterable=__snake_case )
with warnings.catch_warnings(record=__snake_case ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__snake_case ):
pass
assert issubclass(w[-1].category , __snake_case )
assert "only supported for map-style datasets" in str(w[-1].message )
def _lowerCamelCase( ) -> Optional[Any]:
__snake_case = create_accelerator()
accelerator.print("Test that even_batches variable ensures uniform batches across processes" )
test_default_ensures_even_batch_sizes()
accelerator.print("Run tests with even_batches disabled" )
test_can_disable_even_batches()
accelerator.print("Test joining uneven inputs" )
test_can_join_uneven_inputs()
accelerator.print("Test overriding even_batches when joining uneven inputs" )
test_join_can_override_even_batches()
accelerator.print("Test overriding even_batches for mixed dataloader types" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("Test join with non DDP distributed raises warning" )
__snake_case = accelerator.state.distributed_type
__snake_case = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(__snake_case )
__snake_case = original_state
if __name__ == "__main__":
main()
| 524
| 1
|
'''simple docstring'''
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def snake_case_ (_a : BertModel , _a : str , _a : str ):
UpperCAmelCase = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
UpperCAmelCase = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(_a ):
os.makedirs(_a )
UpperCAmelCase = model.state_dict()
def to_tf_var_name(_a : str ):
for patt, repl in iter(_a ):
UpperCAmelCase = name.replace(_a , _a )
return F"bert/{name}"
def create_tf_var(_a : np.ndarray , _a : str , _a : tf.Session ):
UpperCAmelCase = tf.dtypes.as_dtype(tensor.dtype )
UpperCAmelCase = tf.get_variable(dtype=_a , shape=tensor.shape , name=_a , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(_a )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
UpperCAmelCase = to_tf_var_name(_a )
UpperCAmelCase = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
UpperCAmelCase = torch_tensor.T
UpperCAmelCase = create_tf_var(tensor=_a , name=_a , session=_a )
tf.keras.backend.set_value(_a , _a )
UpperCAmelCase = session.run(_a )
print(F"Successfully created {tf_name}: {np.allclose(_a , _a )}" )
UpperCAmelCase = tf.train.Saver(tf.trainable_variables() )
saver.save(_a , os.path.join(_a , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def snake_case_ (_a : Union[str, Any]=None ):
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=_a , required=_a , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=_a , default=_a , required=_a , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=_a , required=_a , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=_a , required=_a , help='''Directory in which to save tensorflow model''' )
UpperCAmelCase = parser.parse_args(_a )
UpperCAmelCase = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=_a , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 358
|
'''simple docstring'''
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
A =logging.getLogger(__name__)
A ={'facebook/bart-base': BartForConditionalGeneration}
A ={'facebook/bart-base': BartTokenizer}
def snake_case_ ():
UpperCAmelCase = argparse.ArgumentParser(description='''Export Bart model + Beam Search to ONNX graph.''' )
parser.add_argument(
'''--validation_file''' , type=_a , default=_a , help='''A csv or a json file containing the validation data.''' )
parser.add_argument(
'''--max_length''' , type=_a , default=5 , help='''The maximum total input sequence length after tokenization.''' , )
parser.add_argument(
'''--num_beams''' , type=_a , default=_a , help=(
'''Number of beams to use for evaluation. This argument will be '''
'''passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'''
) , )
parser.add_argument(
'''--model_name_or_path''' , type=_a , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=_a , )
parser.add_argument(
'''--config_name''' , type=_a , default=_a , help='''Pretrained config name or path if not the same as model_name''' , )
parser.add_argument(
'''--device''' , type=_a , default='''cpu''' , help='''Device where the model will be run''' , )
parser.add_argument('''--output_file_path''' , type=_a , default=_a , help='''Where to store the final ONNX file.''' )
UpperCAmelCase = parser.parse_args()
return args
def snake_case_ (_a : Tuple , _a : str="cpu" ):
UpperCAmelCase = model_dict[model_name].from_pretrained(_a ).to(_a )
UpperCAmelCase = tokenizer_dict[model_name].from_pretrained(_a )
if model_name in ["facebook/bart-base"]:
UpperCAmelCase = 0
UpperCAmelCase = None
UpperCAmelCase = 0
return huggingface_model, tokenizer
def snake_case_ (_a : Optional[int] , _a : List[str] , _a : str , _a : Optional[Any] , _a : str ):
model.eval()
UpperCAmelCase = None
UpperCAmelCase = torch.jit.script(BARTBeamSearchGenerator(_a ) )
with torch.no_grad():
UpperCAmelCase = '''My friends are cool but they eat too many carbs.'''
UpperCAmelCase = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_0_2_4 , return_tensors='''pt''' ).to(model.device )
UpperCAmelCase = model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , num_beams=_a , max_length=_a , early_stopping=_a , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
_a , (
inputs['''input_ids'''],
inputs['''attention_mask'''],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , _a , opset_version=1_4 , input_names=['''input_ids''', '''attention_mask''', '''num_beams''', '''max_length''', '''decoder_start_token_id'''] , output_names=['''output_ids'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''seq'''},
'''output_ids''': {0: '''batch''', 1: '''seq_out'''},
} , example_outputs=_a , )
logger.info('''Model exported to {}'''.format(_a ) )
UpperCAmelCase = remove_dup_initializers(os.path.abspath(_a ) )
logger.info('''Deduplicated and optimized model written to {}'''.format(_a ) )
UpperCAmelCase = onnxruntime.InferenceSession(_a )
UpperCAmelCase = ort_sess.run(
_a , {
'''input_ids''': inputs['''input_ids'''].cpu().numpy(),
'''attention_mask''': inputs['''attention_mask'''].cpu().numpy(),
'''num_beams''': np.array(_a ),
'''max_length''': np.array(_a ),
'''decoder_start_token_id''': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info('''Model outputs from torch and ONNX Runtime are similar.''' )
logger.info('''Success.''' )
def snake_case_ ():
UpperCAmelCase = parse_args()
UpperCAmelCase = 5
UpperCAmelCase = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
UpperCAmelCase = torch.device(args.device )
UpperCAmelCase , UpperCAmelCase = load_model_tokenizer(args.model_name_or_path , _a )
if model.config.decoder_start_token_id is None:
raise ValueError('''Make sure that `config.decoder_start_token_id` is correctly defined''' )
model.to(_a )
if args.max_length:
UpperCAmelCase = args.max_length
if args.num_beams:
UpperCAmelCase = args.num_beams
if args.output_file_path:
UpperCAmelCase = args.output_file_path
else:
UpperCAmelCase = '''BART.onnx'''
logger.info('''Exporting model to ONNX''' )
export_and_validate_model(_a , _a , _a , _a , _a )
if __name__ == "__main__":
main()
| 358
| 1
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase__ ( __snake_case ):
'''simple docstring'''
a : Any = ["image_processor", "tokenizer"]
a : List[Any] = "CLIPImageProcessor"
a : Optional[int] = ("XLMRobertaTokenizer", "XLMRobertaTokenizerFast")
def __init__( self, __magic_name__=None, __magic_name__=None, **__magic_name__ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''', __magic_name__, )
UpperCamelCase__ : List[str] = kwargs.pop('''feature_extractor''' )
UpperCamelCase__ : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__magic_name__, __magic_name__ )
def __call__( self, __magic_name__=None, __magic_name__=None, __magic_name__=None, **__magic_name__ ) -> Tuple:
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCamelCase__ : Optional[Any] = self.tokenizer(__magic_name__, return_tensors=__magic_name__, **__magic_name__ )
if images is not None:
UpperCamelCase__ : Any = self.image_processor(__magic_name__, return_tensors=__magic_name__, **__magic_name__ )
if text is not None and images is not None:
UpperCamelCase__ : int = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__magic_name__ ), tensor_type=__magic_name__ )
def UpperCamelCase__ ( self, *__magic_name__, **__magic_name__ ) -> Dict:
"""simple docstring"""
return self.tokenizer.batch_decode(*__magic_name__, **__magic_name__ )
def UpperCamelCase__ ( self, *__magic_name__, **__magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.decode(*__magic_name__, **__magic_name__ )
@property
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : List[str] = self.tokenizer.model_input_names
UpperCamelCase__ : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 253
|
'''simple docstring'''
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'):
__lowerCAmelCase = True
from torch.cuda.amp import autocast
__lowerCAmelCase = logging.getLogger(__name__)
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
lowerCAmelCase_ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowerCAmelCase_ = field(
default=__snake_case , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
lowerCAmelCase_ = field(
default=__snake_case , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
lowerCAmelCase_ = field(
default=__snake_case , metadata={"help": "Whether to log verbose messages or not."} , )
lowerCAmelCase_ = field(
default=2.0 , metadata={"help": "Maximum temperature for gumbel softmax."} )
lowerCAmelCase_ = field(
default=0.5 , metadata={"help": "Minimum temperature for gumbel softmax."} )
lowerCAmelCase_ = field(
default=0.999995 , metadata={"help": "Decay of gumbel temperature during training."} )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
_snake_case = logging.WARNING
if model_args.verbose_logging:
_snake_case = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
_snake_case = logging.INFO
logger.setLevel(_SCREAMING_SNAKE_CASE )
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
lowerCAmelCase_ = field(
default=__snake_case , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
lowerCAmelCase_ = field(
default=__snake_case , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
lowerCAmelCase_ = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
lowerCAmelCase_ = field(
default="validation" , metadata={
"help": (
"The name of the validation data set split to use (via the datasets library). Defaults to 'validation'"
)
} , )
lowerCAmelCase_ = field(
default="file" , metadata={"help": "Column in the dataset that contains speech file path. Defaults to 'file'"} , )
lowerCAmelCase_ = field(
default=__snake_case , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
lowerCAmelCase_ = field(
default=1 , metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
} , )
lowerCAmelCase_ = field(
default=__snake_case , metadata={"help": "The number of processes to use for the preprocessing."} , )
lowerCAmelCase_ = field(
default=20.0 , metadata={"help": "Filter audio files that are longer than `max_duration_in_seconds` seconds"} )
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = "longest"
lowerCAmelCase_ = None
lowerCAmelCase_ = None
def __call__(self , UpperCAmelCase ) -> Dict[str, torch.Tensor]:
# reformat list to dict and set to pytorch format
_snake_case = self.feature_extractor.pad(
UpperCAmelCase , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
_snake_case = self.model._get_feat_extract_output_lengths(batch["""input_values"""].shape[-1] )
_snake_case = batch["""input_values"""].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
_snake_case = self.model._get_feat_extract_output_lengths(batch["""attention_mask"""].sum(-1 ) ).to(
torch.long )
_snake_case = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch["""input_values"""].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
_snake_case = 1
_snake_case = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
_snake_case = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=UpperCAmelCase , min_masks=2 , )
return batch
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , *UpperCAmelCase , UpperCAmelCase=1 , UpperCAmelCase=0 , UpperCAmelCase=1.0 , **UpperCAmelCase ) -> Optional[int]:
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
_snake_case = 0
_snake_case = max_gumbel_temp
_snake_case = min_gumbel_temp
_snake_case = gumbel_temp_decay
def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> torch.Tensor:
model.train()
_snake_case = self._prepare_inputs(UpperCAmelCase )
if self.use_amp:
with autocast():
_snake_case = self.compute_loss(UpperCAmelCase , UpperCAmelCase )
else:
_snake_case = self.compute_loss(UpperCAmelCase , UpperCAmelCase )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
_snake_case = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_snake_case = loss.sum() / (inputs["""mask_time_indices"""]).sum()
else:
raise ValueError(f"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
_snake_case = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(UpperCAmelCase ).backward()
elif self.use_apex:
with amp.scale_loss(UpperCAmelCase , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(UpperCAmelCase )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def __SCREAMING_SNAKE_CASE ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_snake_case, _snake_case, _snake_case = parser.parse_args_into_dataclasses()
configure_logger(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Downloading and loading a dataset from the hub.
_snake_case = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
_snake_case = DatasetDict()
_snake_case = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}[:{data_args.validation_split_percentage}%]""" , cache_dir=model_args.cache_dir , )
_snake_case = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}[{data_args.validation_split_percentage}%:]""" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
_snake_case = DatasetDict()
_snake_case = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="""validation""" , cache_dir=model_args.cache_dir , )
_snake_case = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}""" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
_snake_case = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_SCREAMING_SNAKE_CASE )
def prepare_dataset(_SCREAMING_SNAKE_CASE ):
# check that all files have the correct sampling rate
_snake_case, _snake_case = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
_snake_case = datasets.map(
_SCREAMING_SNAKE_CASE , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["""train"""].column_names )
# filter audio files that are too long
_snake_case = vectorized_datasets.filter(
lambda _SCREAMING_SNAKE_CASE : len(data["""speech"""] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(_SCREAMING_SNAKE_CASE ):
return feature_extractor(batch["""speech"""] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
_snake_case = vectorized_datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["""train"""].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
_snake_case = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"""PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"""
""" ``config.feat_extract_norm='layer'""" )
_snake_case = WavaVecaForPreTraining(_SCREAMING_SNAKE_CASE )
_snake_case = DataCollatorForWavaVecaPretraining(model=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE )
_snake_case = WavaVecaPreTrainer(
model=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=vectorized_datasets["""train"""] , eval_dataset=vectorized_datasets["""validation"""] , tokenizer=_SCREAMING_SNAKE_CASE , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 585
| 0
|
"""simple docstring"""
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
SCREAMING_SNAKE_CASE__ : int =DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
SCREAMING_SNAKE_CASE__ : int ='main'
# Default branch name
SCREAMING_SNAKE_CASE__ : List[str] ='f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'
# One particular commit (not the top of `main`)
SCREAMING_SNAKE_CASE__ : Dict ='aaaaaaa'
# This commit does not exist, so we should 404.
SCREAMING_SNAKE_CASE__ : Optional[int] ='d9e9f15bc825e4b2c9249e9578f884bbcb5e3684'
# Sha-1 of config.json on the top of `main`, for checking purposes
SCREAMING_SNAKE_CASE__ : Any ='4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'
@contextlib.contextmanager
def UpperCamelCase ( ) ->int:
print('''Welcome!''' )
yield
print('''Bye!''' )
@contextlib.contextmanager
def UpperCamelCase ( ) ->str:
print('''Bonjour!''' )
yield
print('''Au revoir!''' )
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> Tuple:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec('''transformers''' ) is not None
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def a__ ( self , _lowercase ) -> List[Any]:
with ContextManagers([] ):
print('''Transformers are awesome!''' )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , '''Transformers are awesome!\n''' )
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def a__ ( self , _lowercase ) -> List[str]:
with ContextManagers([context_en()] ):
print('''Transformers are awesome!''' )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , '''Welcome!\nTransformers are awesome!\nBye!\n''' )
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def a__ ( self , _lowercase ) -> Optional[int]:
with ContextManagers([context_fr(), context_en()] ):
print('''Transformers are awesome!''' )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , '''Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n''' )
@require_torch
def a__ ( self ) -> List[str]:
self.assertEqual(find_labels(_lowercase ) , ['''labels'''] )
self.assertEqual(find_labels(_lowercase ) , ['''labels''', '''next_sentence_label'''] )
self.assertEqual(find_labels(_lowercase ) , ['''start_positions''', '''end_positions'''] )
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(_lowercase ) , ['''labels'''] )
@require_tf
def a__ ( self ) -> str:
self.assertEqual(find_labels(_lowercase ) , ['''labels'''] )
self.assertEqual(find_labels(_lowercase ) , ['''labels''', '''next_sentence_label'''] )
self.assertEqual(find_labels(_lowercase ) , ['''start_positions''', '''end_positions'''] )
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(_lowercase ) , ['''labels'''] )
@require_flax
def a__ ( self ) -> Tuple:
# Flax models don't have labels
self.assertEqual(find_labels(_lowercase ) , [] )
self.assertEqual(find_labels(_lowercase ) , [] )
self.assertEqual(find_labels(_lowercase ) , [] )
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(_lowercase ) , [] )
| 558
|
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
SCREAMING_SNAKE_CASE__ : Dict =True
except (ImportError, ModuleNotFoundError):
SCREAMING_SNAKE_CASE__ : Any =False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->str:
re.sub('''<n>''' , '''''' , SCREAMING_SNAKE_CASE_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(SCREAMING_SNAKE_CASE_ ) )
| 558
| 1
|
lowerCamelCase_ = '''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 513
|
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def __magic_name__ ( __a : int , __a : Union[str, Any] , __a : Optional[int] , __a : Any ):
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f"Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f"Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"
def __magic_name__ ( __a : List[Any] , __a : List[str] , __a : Optional[Any] , __a : Any , __a : Any=True ):
'''simple docstring'''
model.train()
UpperCamelCase__ = model(__a )
UpperCamelCase__ = F.mse_loss(__a , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(__a )
def __magic_name__ ( __a : Any , __a : int=False ):
'''simple docstring'''
set_seed(42 )
UpperCamelCase__ = RegressionModel()
UpperCamelCase__ = deepcopy(__a )
UpperCamelCase__ = RegressionDataset(length=80 )
UpperCamelCase__ = DataLoader(__a , batch_size=16 )
model.to(accelerator.device )
if sched:
UpperCamelCase__ = AdamW(params=model.parameters() , lr=1E-3 )
UpperCamelCase__ = AdamW(params=ddp_model.parameters() , lr=1E-3 )
UpperCamelCase__ = LambdaLR(__a , lr_lambda=lambda __a : epoch**0.65 )
UpperCamelCase__ = LambdaLR(__a , lr_lambda=lambda __a : epoch**0.65 )
# Make a copy of `model`
if sched:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = accelerator.prepare(__a , __a , __a , __a )
else:
UpperCamelCase__ , UpperCamelCase__ = accelerator.prepare(__a , __a )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def __magic_name__ ( __a : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = get_training_setup(__a )
# Use a single batch
UpperCamelCase__ , UpperCamelCase__ = next(iter(__a ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCamelCase__ , UpperCamelCase__ = accelerator.gather((ddp_input, ddp_target) )
UpperCamelCase__ , UpperCamelCase__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__a , __a , __a , __a )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__a ):
step_model(__a , __a , __a , __a )
else:
# Sync grads
step_model(__a , __a , __a , __a )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(__a , __a , __a , __a )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
UpperCamelCase__ = ddp_input[torch.randperm(len(__a ) )]
def __magic_name__ ( __a : Dict ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = get_training_setup(__a )
# Use a single batch
UpperCamelCase__ , UpperCamelCase__ = next(iter(__a ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCamelCase__ , UpperCamelCase__ = accelerator.gather((ddp_input, ddp_target) )
UpperCamelCase__ , UpperCamelCase__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__a , __a , __a , __a )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__a ):
step_model(__a , __a , __a , __a )
else:
# Sync grads
step_model(__a , __a , __a , __a )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
UpperCamelCase__ = ddp_input[torch.randperm(len(__a ) )]
def __magic_name__ ( __a : List[Any]=False , __a : List[str]=False ):
'''simple docstring'''
UpperCamelCase__ = Accelerator(
split_batches=__a , dispatch_batches=__a , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = get_training_setup(__a )
for iteration, batch in enumerate(__a ):
UpperCamelCase__ , UpperCamelCase__ = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCamelCase__ , UpperCamelCase__ = accelerator.gather((ddp_input, ddp_target) )
UpperCamelCase__ , UpperCamelCase__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__a , __a , __a , __a , __a )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(__a ):
step_model(__a , __a , __a , __a )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(__a ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
UpperCamelCase__ = ddp_input[torch.randperm(len(__a ) )]
GradientState._reset_state()
def __magic_name__ ( __a : Optional[Any]=False , __a : int=False ):
'''simple docstring'''
UpperCamelCase__ = Accelerator(
split_batches=__a , dispatch_batches=__a , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = get_training_setup(__a , __a )
for iteration, batch in enumerate(__a ):
UpperCamelCase__ , UpperCamelCase__ = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCamelCase__ , UpperCamelCase__ = accelerator.gather((ddp_input, ddp_target) )
UpperCamelCase__ , UpperCamelCase__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(__a , __a , __a , __a , __a )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(__a )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(__a ):
step_model(__a , __a , __a , __a )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f"Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n"
UpperCamelCase__ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(__a ))
if accelerator.num_processes > 1:
check_model_parameters(__a , __a , __a , __a )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
GradientState._reset_state()
def __magic_name__ ( ):
'''simple docstring'''
UpperCamelCase__ = Accelerator()
UpperCamelCase__ = RegressionDataset(length=80 )
UpperCamelCase__ = DataLoader(__a , batch_size=16 )
UpperCamelCase__ = RegressionDataset(length=96 )
UpperCamelCase__ = DataLoader(__a , batch_size=16 )
UpperCamelCase__ , UpperCamelCase__ = accelerator.prepare(__a , __a )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(__a ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__a )
if iteration < len(__a ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(__a ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__a )
if batch_num < len(__a ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def __magic_name__ ( ):
'''simple docstring'''
UpperCamelCase__ = Accelerator()
UpperCamelCase__ = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(__a )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(__a )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """ , f"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**" , )
test_gradient_accumulation(__a , __a )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""" , """2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , f"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**" , )
test_gradient_accumulation_with_opt_and_scheduler(__a , __a )
def __magic_name__ ( __a : List[Any] ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 513
| 1
|
"""simple docstring"""
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self :Optional[Any] , __lowercase :str = "" , __lowercase :bool = False ):
# Mapping from the first character of the prefix of the node
__lowerCamelCase : dict[str, RadixNode] ={}
# A node will be a leaf if the tree contains its word
__lowerCamelCase : Optional[Any] =is_leaf
__lowerCamelCase : Dict =prefix
def __lowercase ( self :Union[str, Any] , __lowercase :str ):
__lowerCamelCase : str =0
for q, w in zip(self.prefix , __lowercase ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def __lowercase ( self :List[str] , __lowercase :list[str] ):
for word in words:
self.insert(__lowercase )
def __lowercase ( self :Optional[Any] , __lowercase :str ):
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
__lowerCamelCase : List[str] =True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
__lowerCamelCase : str =RadixNode(prefix=__lowercase , is_leaf=__lowercase )
else:
__lowerCamelCase : Dict =self.nodes[word[0]]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] =incoming_node.match(
__lowercase )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(__lowercase )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
__lowerCamelCase : Tuple =remaining_prefix
__lowerCamelCase : str =self.nodes[matching_string[0]]
__lowerCamelCase : Optional[Any] =RadixNode(__lowercase , __lowercase )
__lowerCamelCase : List[Any] =aux_node
if remaining_word == "":
__lowerCamelCase : Tuple =True
else:
self.nodes[matching_string[0]].insert(__lowercase )
def __lowercase ( self :Dict , __lowercase :str ):
__lowerCamelCase : Dict =self.nodes.get(word[0] , __lowercase )
if not incoming_node:
return False
else:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Tuple =incoming_node.match(
__lowercase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(__lowercase )
def __lowercase ( self :Optional[int] , __lowercase :str ):
__lowerCamelCase : Dict =self.nodes.get(word[0] , __lowercase )
if not incoming_node:
return False
else:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Tuple =incoming_node.match(
__lowercase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(__lowercase )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
__lowerCamelCase : Union[str, Any] =list(self.nodes.values() )[0]
__lowerCamelCase : Optional[Any] =merging_node.is_leaf
self.prefix += merging_node.prefix
__lowerCamelCase : Union[str, Any] =merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
__lowerCamelCase : List[Any] =False
# If there is 1 edge, we merge it with its child
else:
__lowerCamelCase : Optional[Any] =list(incoming_node.nodes.values() )[0]
__lowerCamelCase : Optional[Any] =merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
__lowerCamelCase : List[str] =merging_node.nodes
return True
def __lowercase ( self :Any , __lowercase :int = 0 ):
if self.prefix != "":
print('''-''' * height , self.prefix , ''' (leaf)''' if self.is_leaf else '''''' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def lowerCAmelCase_ ( ):
'''simple docstring'''
__lowerCamelCase : Any ='''banana bananas bandana band apple all beast'''.split()
__lowerCamelCase : str =RadixNode()
root.insert_many(SCREAMING_SNAKE_CASE )
assert all(root.find(SCREAMING_SNAKE_CASE ) for word in words )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def lowerCAmelCase_ ( ):
'''simple docstring'''
assert test_trie()
def lowerCAmelCase_ ( ):
'''simple docstring'''
__lowerCamelCase : Dict =RadixNode()
__lowerCamelCase : List[str] ='''banana bananas bandanas bandana band apple all beast'''.split()
root.insert_many(SCREAMING_SNAKE_CASE )
print('''Words:''' , SCREAMING_SNAKE_CASE )
print('''Tree:''' )
root.print_tree()
if __name__ == "__main__":
main()
| 363
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
_UpperCamelCase = logging.get_logger(__name__)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Union[tf.Tensor, np.ndarray] ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE , np.ndarray ):
return list(tensor.shape )
__lowerCamelCase : Tuple =tf.shape(SCREAMING_SNAKE_CASE )
if tensor.shape == tf.TensorShape(SCREAMING_SNAKE_CASE ):
return dynamic
__lowerCamelCase : Union[str, Any] =tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(SCREAMING_SNAKE_CASE )]
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : tf.Tensor , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[str] = None ):
'''simple docstring'''
return tf.nn.softmax(logits=logits + 1E-9 , axis=SCREAMING_SNAKE_CASE , name=SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Tuple=1E-5 , SCREAMING_SNAKE_CASE : Union[str, Any]=-1 ):
'''simple docstring'''
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' )
# Get mean and variance on the axis to be normalized
__lowerCamelCase , __lowerCamelCase : Dict =tf.nn.moments(SCREAMING_SNAKE_CASE , axes=[axis] , keepdims=SCREAMING_SNAKE_CASE )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
__lowerCamelCase : Dict =[1] * inputs.shape.rank
__lowerCamelCase : Dict =shape_list(SCREAMING_SNAKE_CASE )[axis]
__lowerCamelCase : List[str] =tf.reshape(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCamelCase : List[str] =tf.reshape(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Compute layer normalization using the batch_normalization
# function.
__lowerCamelCase : int =tf.nn.batch_normalization(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , offset=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , variance_epsilon=SCREAMING_SNAKE_CASE , )
return outputs
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Dict=0 , SCREAMING_SNAKE_CASE : int=-1 ):
'''simple docstring'''
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
__lowerCamelCase : List[str] =tf.shape(SCREAMING_SNAKE_CASE )
__lowerCamelCase : Optional[Any] =tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
__lowerCamelCase : int =tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : tf.Tensor ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , tf.Tensor ):
__lowerCamelCase : Union[str, Any] =tf.convert_to_tensor(SCREAMING_SNAKE_CASE ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
__lowerCamelCase : str =encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
__lowerCamelCase : Optional[int] =encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
__lowerCamelCase : int =(
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : tf.Tensor , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str = "input_ids" ):
'''simple docstring'''
tf.debugging.assert_less(
SCREAMING_SNAKE_CASE , tf.cast(SCREAMING_SNAKE_CASE , dtype=tensor.dtype ) , message=(
F'The maximum value of {tensor_name} ({tf.math.reduce_max(SCREAMING_SNAKE_CASE )}) must be smaller than the embedding '
F'layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'
) , )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
__lowerCamelCase : Optional[int] =64512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
__lowerCamelCase : Any =[x for x in data if len(SCREAMING_SNAKE_CASE ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'''The following attributes cannot be saved to HDF5 file because '''
F'they are larger than {HDF5_OBJECT_HEADER_LIMIT} '
F'bytes: {bad_attributes}' )
__lowerCamelCase : Tuple =np.asarray(SCREAMING_SNAKE_CASE )
__lowerCamelCase : Any =1
__lowerCamelCase : Optional[Any] =np.array_split(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
__lowerCamelCase : Dict =np.array_split(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(SCREAMING_SNAKE_CASE ):
__lowerCamelCase : List[Any] =chunk_data
else:
__lowerCamelCase : Optional[int] =data
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
if name in group.attrs:
__lowerCamelCase : Optional[int] =[n.decode('''utf8''' ) if hasattr(SCREAMING_SNAKE_CASE , '''decode''' ) else n for n in group.attrs[name]]
else:
__lowerCamelCase : Tuple =[]
__lowerCamelCase : List[str] =0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('''utf8''' ) if hasattr(SCREAMING_SNAKE_CASE , '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] )
chunk_id += 1
return data
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
def _expand_single_ad_tensor(SCREAMING_SNAKE_CASE : List[str] ):
if isinstance(SCREAMING_SNAKE_CASE , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(SCREAMING_SNAKE_CASE , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , SCREAMING_SNAKE_CASE )
| 363
| 1
|
import re
import string
import numpy as np
import datasets
UpperCAmelCase_ = "\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n"
UpperCAmelCase_ = "\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n"
UpperCAmelCase_ = "\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def UpperCamelCase( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , reference_urls=[] , )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=False , ):
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
_UpperCAmelCase = np.array([re.sub(_UpperCamelCase , '''''' , _UpperCamelCase ) for x in predictions] )
_UpperCAmelCase = np.array([re.sub(_UpperCamelCase , '''''' , _UpperCamelCase ) for x in references] )
else:
_UpperCAmelCase = np.asarray(_UpperCamelCase )
_UpperCAmelCase = np.asarray(_UpperCamelCase )
if ignore_case:
_UpperCAmelCase = np.char.lower(_UpperCamelCase )
_UpperCAmelCase = np.char.lower(_UpperCamelCase )
if ignore_punctuation:
_UpperCAmelCase = string.punctuation.maketrans('''''' , '''''' , string.punctuation )
_UpperCAmelCase = np.char.translate(_UpperCamelCase , table=_UpperCamelCase )
_UpperCAmelCase = np.char.translate(_UpperCamelCase , table=_UpperCamelCase )
if ignore_numbers:
_UpperCAmelCase = string.digits.maketrans('''''' , '''''' , string.digits )
_UpperCAmelCase = np.char.translate(_UpperCamelCase , table=_UpperCamelCase )
_UpperCAmelCase = np.char.translate(_UpperCamelCase , table=_UpperCamelCase )
_UpperCAmelCase = predictions == references
return {"exact_match": np.mean(_UpperCamelCase ) * 100}
| 32
|
from __future__ import annotations
def _UpperCAmelCase ( UpperCAmelCase : str , UpperCAmelCase : str ):
"""simple docstring"""
__lowerCamelCase : int = get_failure_array(UpperCAmelCase )
# 2) Step through text searching for pattern
__lowerCamelCase , __lowerCamelCase : Any = 0, 0 # index into text, pattern
while i < len(UpperCAmelCase ):
if pattern[j] == text[i]:
if j == (len(UpperCAmelCase ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
__lowerCamelCase : Optional[Any] = failure[j - 1]
continue
i += 1
return False
def _UpperCAmelCase ( UpperCAmelCase : str ):
"""simple docstring"""
__lowerCamelCase : List[str] = [0]
__lowerCamelCase : Dict = 0
__lowerCamelCase : List[Any] = 1
while j < len(UpperCAmelCase ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
__lowerCamelCase : Any = failure[i - 1]
continue
j += 1
failure.append(UpperCAmelCase )
return failure
if __name__ == "__main__":
# Test 1)
__UpperCamelCase : List[str] = 'abc1abc12'
__UpperCamelCase : Dict = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
__UpperCamelCase : Optional[int] = 'alskfjaldsk23adsfabcabc'
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
__UpperCamelCase : Any = 'ABABX'
__UpperCamelCase : List[str] = 'ABABZABABYABABX'
assert kmp(pattern, text)
# Test 3)
__UpperCamelCase : List[Any] = 'AAAB'
__UpperCamelCase : Any = 'ABAAAAAB'
assert kmp(pattern, text)
# Test 4)
__UpperCamelCase : Union[str, Any] = 'abcdabcy'
__UpperCamelCase : Tuple = 'abcxabcdabxabcdabcdabcy'
assert kmp(pattern, text)
# Test 5)
__UpperCamelCase : Union[str, Any] = 'aabaabaaa'
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 519
| 0
|
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_=7 , UpperCamelCase_=3 , UpperCamelCase_=30 , UpperCamelCase_=4_00 , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=True , UpperCamelCase_=1 / 2_55 , UpperCamelCase_=True , ) -> str:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__lowercase : Any = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
__lowercase : Dict = parent
__lowercase : Optional[Any] = batch_size
__lowercase : int = num_channels
__lowercase : Union[str, Any] = min_resolution
__lowercase : Dict = max_resolution
__lowercase : List[Any] = do_resize
__lowercase : Any = size
__lowercase : List[Any] = do_normalize
__lowercase : Optional[int] = image_mean
__lowercase : Tuple = image_std
__lowercase : str = do_rescale
__lowercase : Optional[Any] = rescale_factor
__lowercase : List[str] = do_pad
def _lowerCamelCase ( self ) -> List[str]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_=False ) -> List[str]:
if not batched:
__lowercase : Optional[int] = image_inputs[0]
if isinstance(UpperCamelCase_ , Image.Image ):
__lowercase ,__lowercase : Dict = image.size
else:
__lowercase ,__lowercase : Optional[int] = image.shape[1], image.shape[2]
if w < h:
__lowercase : Any = int(self.size['''shortest_edge'''] * h / w )
__lowercase : Union[str, Any] = self.size['''shortest_edge''']
elif w > h:
__lowercase : int = self.size['''shortest_edge''']
__lowercase : Any = int(self.size['''shortest_edge'''] * w / h )
else:
__lowercase : List[str] = self.size['''shortest_edge''']
__lowercase : Union[str, Any] = self.size['''shortest_edge''']
else:
__lowercase : Optional[Any] = []
for image in image_inputs:
__lowercase ,__lowercase : Dict = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowercase : Optional[int] = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[0] )[0]
__lowercase : int = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCAmelCase_ ( snake_case , unittest.TestCase ):
UpperCamelCase =ConditionalDetrImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self ) -> str:
__lowercase : int = ConditionalDetrImageProcessingTester(self )
@property
def _lowerCamelCase ( self ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self ) -> List[Any]:
__lowercase : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_std''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''size''' ) )
def _lowerCamelCase ( self ) -> Any:
__lowercase : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} )
self.assertEqual(image_processor.do_pad , UpperCamelCase_ )
__lowercase : Tuple = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase_ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , UpperCamelCase_ )
def _lowerCamelCase ( self ) -> int:
pass
def _lowerCamelCase ( self ) -> Any:
# Initialize image_processing
__lowercase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
__lowercase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__lowercase ,__lowercase : Any = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowercase ,__lowercase : int = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
__lowercase : int = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowerCamelCase ( self ) -> List[str]:
# Initialize image_processing
__lowercase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray )
# Test not batched input
__lowercase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__lowercase ,__lowercase : int = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowercase : List[Any] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
__lowercase ,__lowercase : Optional[Any] = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowerCamelCase ( self ) -> Tuple:
# Initialize image_processing
__lowercase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
# Test not batched input
__lowercase : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__lowercase ,__lowercase : List[str] = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowercase : List[Any] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
__lowercase ,__lowercase : Optional[Any] = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _lowerCamelCase ( self ) -> Union[str, Any]:
# prepare image and target
__lowercase : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
__lowercase : Tuple = json.loads(f.read() )
__lowercase : List[str] = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
__lowercase : Optional[int] = ConditionalDetrImageProcessor.from_pretrained('''microsoft/conditional-detr-resnet-50''' )
__lowercase : Optional[int] = image_processing(images=UpperCamelCase_ , annotations=UpperCamelCase_ , return_tensors='''pt''' )
# verify pixel values
__lowercase : List[str] = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , UpperCamelCase_ )
__lowercase : Tuple = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCamelCase_ , atol=1E-4 ) )
# verify area
__lowercase : Union[str, Any] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCamelCase_ ) )
# verify boxes
__lowercase : int = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCamelCase_ )
__lowercase : int = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCamelCase_ , atol=1E-3 ) )
# verify image_id
__lowercase : Any = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCamelCase_ ) )
# verify is_crowd
__lowercase : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCamelCase_ ) )
# verify class_labels
__lowercase : Optional[Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCamelCase_ ) )
# verify orig_size
__lowercase : Any = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCamelCase_ ) )
# verify size
__lowercase : str = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCamelCase_ ) )
@slow
def _lowerCamelCase ( self ) -> Dict:
# prepare image, target and masks_path
__lowercase : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
__lowercase : List[str] = json.loads(f.read() )
__lowercase : Dict = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
__lowercase : Union[str, Any] = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
__lowercase : Tuple = ConditionalDetrImageProcessor(format='''coco_panoptic''' )
__lowercase : Dict = image_processing(images=UpperCamelCase_ , annotations=UpperCamelCase_ , masks_path=UpperCamelCase_ , return_tensors='''pt''' )
# verify pixel values
__lowercase : Dict = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , UpperCamelCase_ )
__lowercase : Optional[int] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCamelCase_ , atol=1E-4 ) )
# verify area
__lowercase : Optional[int] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCamelCase_ ) )
# verify boxes
__lowercase : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCamelCase_ )
__lowercase : Optional[int] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCamelCase_ , atol=1E-3 ) )
# verify image_id
__lowercase : List[str] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCamelCase_ ) )
# verify is_crowd
__lowercase : str = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCamelCase_ ) )
# verify class_labels
__lowercase : int = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCamelCase_ ) )
# verify masks
__lowercase : Union[str, Any] = 82_28_73
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , UpperCamelCase_ )
# verify orig_size
__lowercase : Optional[int] = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCamelCase_ ) )
# verify size
__lowercase : List[str] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCamelCase_ ) )
| 523
|
"""simple docstring"""
import copy
import random
from transformers import CLIPTokenizer
class UpperCAmelCase_ ( snake_case ):
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Optional[Any]:
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
__lowercase : int = {}
def _lowerCamelCase ( self , UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ ) -> Optional[int]:
__lowercase : Optional[int] = super().add_tokens(UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ )
if num_added_tokens == 0:
raise ValueError(
F"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
''' `placeholder_token` that is not already in the tokenizer.''' )
def _lowerCamelCase ( self , UpperCamelCase_ , *UpperCamelCase_ , UpperCamelCase_=1 , **UpperCamelCase_ ) -> Optional[Any]:
__lowercase : Union[str, Any] = []
if num_vec_per_token == 1:
self.try_adding_tokens(UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ )
output.append(UpperCamelCase_ )
else:
__lowercase : List[Any] = []
for i in range(UpperCamelCase_ ):
__lowercase : List[str] = placeholder_token + F"""_{i}"""
self.try_adding_tokens(UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ )
output.append(UpperCamelCase_ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F"""The tokenizer already has placeholder token {token} that can get confused with"""
F""" {placeholder_token}keep placeholder tokens independent""" )
__lowercase : int = output
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_=False , UpperCamelCase_=1.0 ) -> Tuple:
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowercase : Optional[Any] = []
for i in range(len(UpperCamelCase_ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=UpperCamelCase_ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
__lowercase : List[Any] = self.token_map[placeholder_token]
__lowercase : Optional[Any] = tokens[: 1 + int(len(UpperCamelCase_ ) * prop_tokens_to_load )]
if vector_shuffle:
__lowercase : int = copy.copy(UpperCamelCase_ )
random.shuffle(UpperCamelCase_ )
__lowercase : Tuple = text.replace(UpperCamelCase_ , ''' '''.join(UpperCamelCase_ ) )
return text
def __call__( self , UpperCamelCase_ , *UpperCamelCase_ , UpperCamelCase_=False , UpperCamelCase_=1.0 , **UpperCamelCase_ ) -> Optional[Any]:
return super().__call__(
self.replace_placeholder_tokens_in_text(
UpperCamelCase_ , vector_shuffle=UpperCamelCase_ , prop_tokens_to_load=UpperCamelCase_ ) , *UpperCamelCase_ , **UpperCamelCase_ , )
def _lowerCamelCase ( self , UpperCamelCase_ , *UpperCamelCase_ , UpperCamelCase_=False , UpperCamelCase_=1.0 , **UpperCamelCase_ ) -> int:
return super().encode(
self.replace_placeholder_tokens_in_text(
UpperCamelCase_ , vector_shuffle=UpperCamelCase_ , prop_tokens_to_load=UpperCamelCase_ ) , *UpperCamelCase_ , **UpperCamelCase_ , )
| 523
| 1
|
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("""socket.socket""" )
@patch("""builtins.open""" )
def UpperCamelCase (lowercase_: int , lowercase_: str ) -> int:
# ===== initialization =====
A__ : Union[str, Any] = Mock()
A__ : List[Any] = conn, Mock()
A__ : Optional[int] = iter([1, None] )
A__ : List[Any] = lambda lowercase_ : next(lowercase_ )
# ===== invoke =====
send_file(filename="""mytext.txt""" , testing=lowercase_ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 456
|
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
A_ : List[Any] = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
A_ : Tuple = {
# fairseq:
'wmt19-ru-en': {'length_penalty': 1.1},
'wmt19-en-ru': {'length_penalty': 1.15},
'wmt19-en-de': {'length_penalty': 1.0},
'wmt19-de-en': {'length_penalty': 1.1},
# allenai:
'wmt16-en-de-dist-12-1': {'length_penalty': 0.6},
'wmt16-en-de-dist-6-1': {'length_penalty': 0.6},
'wmt16-en-de-12-1': {'length_penalty': 0.8},
'wmt19-de-en-6-6-base': {'length_penalty': 0.6},
'wmt19-de-en-6-6-big': {'length_penalty': 0.6},
}
# this remaps the different models to their organization names
A_ : Optional[int] = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
A_ : Optional[int] = 'facebook'
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
A_ : Optional[int] = 'allenai'
def UpperCamelCase (lowercase_: int ) -> Tuple:
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
A__ : int = dict((re.sub(r"""@@$""" , """""" , lowercase_ ), v) if k.endswith("""@@""" ) else (re.sub(r"""$""" , """</w>""" , lowercase_ ), v) for k, v in d.items() )
A__ : str = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[f"""{k}</w>"""]
A__ : Any = d[k] # restore
return da
def UpperCamelCase (lowercase_: Tuple , lowercase_: Tuple ) -> Optional[int]:
# prep
assert os.path.exists(lowercase_ )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
print(f"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
A__ : Dict = basename(lowercase_ )
A__ : int = dirname(lowercase_ )
A__ : List[str] = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
A__ : Union[str, Any] = cls.hub_models()
A__ : Optional[Any] = {"""bpe""": """fastbpe""", """tokenizer""": """moses"""}
A__ : Union[str, Any] = """."""
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f"""using checkpoint {checkpoint_file}""" )
A__ : Tuple = hub_utils.from_pretrained(
lowercase_ , lowercase_ , lowercase_ , archive_map=lowercase_ , **lowercase_ )
A__ : Any = vars(chkpt["""args"""]["""model"""] )
A__ : Optional[Any] = args["""source_lang"""]
A__ : Optional[Any] = args["""target_lang"""]
A__ : Dict = dirname(lowercase_ )
A__ : Optional[Any] = basename(lowercase_ )
# dicts
A__ : Optional[int] = os.path.join(lowercase_ , f"""dict.{src_lang}.txt""" )
A__ : int = os.path.join(lowercase_ , f"""dict.{tgt_lang}.txt""" )
A__ : Dict = Dictionary.load(lowercase_ )
A__ : List[str] = rewrite_dict_keys(src_dict.indices )
A__ : Any = len(lowercase_ )
A__ : str = os.path.join(lowercase_ , """vocab-src.json""" )
print(f"""Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records""" )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowercase_ , ensure_ascii=lowercase_ , indent=lowercase_ ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
A__ : Optional[Any] = True
for k in src_vocab.keys():
if not k.islower():
A__ : Tuple = False
break
A__ : List[str] = Dictionary.load(lowercase_ )
A__ : Union[str, Any] = rewrite_dict_keys(tgt_dict.indices )
A__ : str = len(lowercase_ )
A__ : int = os.path.join(lowercase_ , """vocab-tgt.json""" )
print(f"""Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records""" )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowercase_ , ensure_ascii=lowercase_ , indent=lowercase_ ) )
# merges_file (bpecodes)
A__ : Dict = os.path.join(lowercase_ , VOCAB_FILES_NAMES["""merges_file"""] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
A__ : Any = os.path.join(lowercase_ , lowercase_ )
if os.path.exists(lowercase_ ):
break
with open(lowercase_ , encoding="""utf-8""" ) as fin:
A__ : Any = fin.read()
A__ : List[str] = re.sub(r""" \d+$""" , """""" , lowercase_ , 0 , re.M ) # remove frequency number
print(f"""Generating {merges_file}""" )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as fout:
fout.write(lowercase_ )
# model config
A__ : Optional[Any] = os.path.join(lowercase_ , """config.json""" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f"""need to extend tokenizer to support bpe={args['bpe']}"""
assert args["tokenizer"] == "moses", f"""need to extend tokenizer to support bpe={args['tokenizer']}"""
A__ : List[str] = {
"""architectures""": ["""FSMTForConditionalGeneration"""],
"""model_type""": """fsmt""",
"""activation_dropout""": args["""activation_dropout"""],
"""activation_function""": """relu""",
"""attention_dropout""": args["""attention_dropout"""],
"""d_model""": args["""decoder_embed_dim"""],
"""dropout""": args["""dropout"""],
"""init_std""": 0.02,
"""max_position_embeddings""": args["""max_source_positions"""],
"""num_hidden_layers""": args["""encoder_layers"""],
"""src_vocab_size""": src_vocab_size,
"""tgt_vocab_size""": tgt_vocab_size,
"""langs""": [src_lang, tgt_lang],
"""encoder_attention_heads""": args["""encoder_attention_heads"""],
"""encoder_ffn_dim""": args["""encoder_ffn_embed_dim"""],
"""encoder_layerdrop""": args["""encoder_layerdrop"""],
"""encoder_layers""": args["""encoder_layers"""],
"""decoder_attention_heads""": args["""decoder_attention_heads"""],
"""decoder_ffn_dim""": args["""decoder_ffn_embed_dim"""],
"""decoder_layerdrop""": args["""decoder_layerdrop"""],
"""decoder_layers""": args["""decoder_layers"""],
"""bos_token_id""": 0,
"""pad_token_id""": 1,
"""eos_token_id""": 2,
"""is_encoder_decoder""": True,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_all_embeddings"""],
}
# good hparam defaults to start with
A__ : Tuple = 5
A__ : List[str] = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
A__ : int = best_score_hparams[model_dir]["""length_penalty"""]
else:
A__ : List[Any] = 1.0
print(f"""Generating {fsmt_model_config_file}""" )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowercase_ , ensure_ascii=lowercase_ , indent=lowercase_ ) )
# tokenizer config
A__ : Dict = os.path.join(lowercase_ , lowercase_ )
A__ : str = {
"""langs""": [src_lang, tgt_lang],
"""model_max_length""": 1024,
"""do_lower_case""": do_lower_case,
}
print(f"""Generating {fsmt_tokenizer_config_file}""" )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowercase_ , ensure_ascii=lowercase_ , indent=lowercase_ ) )
# model
A__ : int = chkpt["""models"""][0]
A__ : Dict = model.state_dict()
# rename keys to start with 'model.'
A__ : Union[str, Any] = OrderedDict(("""model.""" + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
A__ : List[str] = [
"""model.model""",
"""model.encoder.version""",
"""model.decoder.version""",
"""model.encoder_embed_tokens.weight""",
"""model.decoder_embed_tokens.weight""",
"""model.encoder.embed_positions._float_tensor""",
"""model.decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
model_state_dict.pop(lowercase_ , lowercase_ )
A__ : str = FSMTConfig.from_pretrained(lowercase_ )
A__ : Dict = FSMTForConditionalGeneration(lowercase_ )
# check that it loads ok
model_new.load_state_dict(lowercase_ , strict=lowercase_ )
# save
A__ : int = os.path.join(lowercase_ , lowercase_ )
print(f"""Generating {pytorch_weights_dump_path}""" )
torch.save(lowercase_ , lowercase_ )
print("""Conversion is done!""" )
print("""\nLast step is to upload the files to s3""" )
print(f"""cd {data_root}""" )
print(f"""transformers-cli upload {model_dir}""" )
if __name__ == "__main__":
A_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fsmt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
A_ : Tuple = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 456
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
lowerCamelCase = {
'''Acehnese Arabic''': '''ace_Arab''',
'''Acehnese Latin''': '''ace_Latn''',
'''Mesopotamian Arabic''': '''acm_Arab''',
'''Ta\'izzi-Adeni Arabic''': '''acq_Arab''',
'''Tunisian Arabic''': '''aeb_Arab''',
'''Afrikaans''': '''afr_Latn''',
'''South Levantine Arabic''': '''ajp_Arab''',
'''Akan''': '''aka_Latn''',
'''Amharic''': '''amh_Ethi''',
'''North Levantine Arabic''': '''apc_Arab''',
'''Modern Standard Arabic''': '''arb_Arab''',
'''Modern Standard Arabic Romanized''': '''arb_Latn''',
'''Najdi Arabic''': '''ars_Arab''',
'''Moroccan Arabic''': '''ary_Arab''',
'''Egyptian Arabic''': '''arz_Arab''',
'''Assamese''': '''asm_Beng''',
'''Asturian''': '''ast_Latn''',
'''Awadhi''': '''awa_Deva''',
'''Central Aymara''': '''ayr_Latn''',
'''South Azerbaijani''': '''azb_Arab''',
'''North Azerbaijani''': '''azj_Latn''',
'''Bashkir''': '''bak_Cyrl''',
'''Bambara''': '''bam_Latn''',
'''Balinese''': '''ban_Latn''',
'''Belarusian''': '''bel_Cyrl''',
'''Bemba''': '''bem_Latn''',
'''Bengali''': '''ben_Beng''',
'''Bhojpuri''': '''bho_Deva''',
'''Banjar Arabic''': '''bjn_Arab''',
'''Banjar Latin''': '''bjn_Latn''',
'''Standard Tibetan''': '''bod_Tibt''',
'''Bosnian''': '''bos_Latn''',
'''Buginese''': '''bug_Latn''',
'''Bulgarian''': '''bul_Cyrl''',
'''Catalan''': '''cat_Latn''',
'''Cebuano''': '''ceb_Latn''',
'''Czech''': '''ces_Latn''',
'''Chokwe''': '''cjk_Latn''',
'''Central Kurdish''': '''ckb_Arab''',
'''Crimean Tatar''': '''crh_Latn''',
'''Welsh''': '''cym_Latn''',
'''Danish''': '''dan_Latn''',
'''German''': '''deu_Latn''',
'''Southwestern Dinka''': '''dik_Latn''',
'''Dyula''': '''dyu_Latn''',
'''Dzongkha''': '''dzo_Tibt''',
'''Greek''': '''ell_Grek''',
'''English''': '''eng_Latn''',
'''Esperanto''': '''epo_Latn''',
'''Estonian''': '''est_Latn''',
'''Basque''': '''eus_Latn''',
'''Ewe''': '''ewe_Latn''',
'''Faroese''': '''fao_Latn''',
'''Fijian''': '''fij_Latn''',
'''Finnish''': '''fin_Latn''',
'''Fon''': '''fon_Latn''',
'''French''': '''fra_Latn''',
'''Friulian''': '''fur_Latn''',
'''Nigerian Fulfulde''': '''fuv_Latn''',
'''Scottish Gaelic''': '''gla_Latn''',
'''Irish''': '''gle_Latn''',
'''Galician''': '''glg_Latn''',
'''Guarani''': '''grn_Latn''',
'''Gujarati''': '''guj_Gujr''',
'''Haitian Creole''': '''hat_Latn''',
'''Hausa''': '''hau_Latn''',
'''Hebrew''': '''heb_Hebr''',
'''Hindi''': '''hin_Deva''',
'''Chhattisgarhi''': '''hne_Deva''',
'''Croatian''': '''hrv_Latn''',
'''Hungarian''': '''hun_Latn''',
'''Armenian''': '''hye_Armn''',
'''Igbo''': '''ibo_Latn''',
'''Ilocano''': '''ilo_Latn''',
'''Indonesian''': '''ind_Latn''',
'''Icelandic''': '''isl_Latn''',
'''Italian''': '''ita_Latn''',
'''Javanese''': '''jav_Latn''',
'''Japanese''': '''jpn_Jpan''',
'''Kabyle''': '''kab_Latn''',
'''Jingpho''': '''kac_Latn''',
'''Kamba''': '''kam_Latn''',
'''Kannada''': '''kan_Knda''',
'''Kashmiri Arabic''': '''kas_Arab''',
'''Kashmiri Devanagari''': '''kas_Deva''',
'''Georgian''': '''kat_Geor''',
'''Central Kanuri Arabic''': '''knc_Arab''',
'''Central Kanuri Latin''': '''knc_Latn''',
'''Kazakh''': '''kaz_Cyrl''',
'''Kabiyè''': '''kbp_Latn''',
'''Kabuverdianu''': '''kea_Latn''',
'''Khmer''': '''khm_Khmr''',
'''Kikuyu''': '''kik_Latn''',
'''Kinyarwanda''': '''kin_Latn''',
'''Kyrgyz''': '''kir_Cyrl''',
'''Kimbundu''': '''kmb_Latn''',
'''Northern Kurdish''': '''kmr_Latn''',
'''Kikongo''': '''kon_Latn''',
'''Korean''': '''kor_Hang''',
'''Lao''': '''lao_Laoo''',
'''Ligurian''': '''lij_Latn''',
'''Limburgish''': '''lim_Latn''',
'''Lingala''': '''lin_Latn''',
'''Lithuanian''': '''lit_Latn''',
'''Lombard''': '''lmo_Latn''',
'''Latgalian''': '''ltg_Latn''',
'''Luxembourgish''': '''ltz_Latn''',
'''Luba-Kasai''': '''lua_Latn''',
'''Ganda''': '''lug_Latn''',
'''Luo''': '''luo_Latn''',
'''Mizo''': '''lus_Latn''',
'''Standard Latvian''': '''lvs_Latn''',
'''Magahi''': '''mag_Deva''',
'''Maithili''': '''mai_Deva''',
'''Malayalam''': '''mal_Mlym''',
'''Marathi''': '''mar_Deva''',
'''Minangkabau Arabic ''': '''min_Arab''',
'''Minangkabau Latin''': '''min_Latn''',
'''Macedonian''': '''mkd_Cyrl''',
'''Plateau Malagasy''': '''plt_Latn''',
'''Maltese''': '''mlt_Latn''',
'''Meitei Bengali''': '''mni_Beng''',
'''Halh Mongolian''': '''khk_Cyrl''',
'''Mossi''': '''mos_Latn''',
'''Maori''': '''mri_Latn''',
'''Burmese''': '''mya_Mymr''',
'''Dutch''': '''nld_Latn''',
'''Norwegian Nynorsk''': '''nno_Latn''',
'''Norwegian Bokmål''': '''nob_Latn''',
'''Nepali''': '''npi_Deva''',
'''Northern Sotho''': '''nso_Latn''',
'''Nuer''': '''nus_Latn''',
'''Nyanja''': '''nya_Latn''',
'''Occitan''': '''oci_Latn''',
'''West Central Oromo''': '''gaz_Latn''',
'''Odia''': '''ory_Orya''',
'''Pangasinan''': '''pag_Latn''',
'''Eastern Panjabi''': '''pan_Guru''',
'''Papiamento''': '''pap_Latn''',
'''Western Persian''': '''pes_Arab''',
'''Polish''': '''pol_Latn''',
'''Portuguese''': '''por_Latn''',
'''Dari''': '''prs_Arab''',
'''Southern Pashto''': '''pbt_Arab''',
'''Ayacucho Quechua''': '''quy_Latn''',
'''Romanian''': '''ron_Latn''',
'''Rundi''': '''run_Latn''',
'''Russian''': '''rus_Cyrl''',
'''Sango''': '''sag_Latn''',
'''Sanskrit''': '''san_Deva''',
'''Santali''': '''sat_Olck''',
'''Sicilian''': '''scn_Latn''',
'''Shan''': '''shn_Mymr''',
'''Sinhala''': '''sin_Sinh''',
'''Slovak''': '''slk_Latn''',
'''Slovenian''': '''slv_Latn''',
'''Samoan''': '''smo_Latn''',
'''Shona''': '''sna_Latn''',
'''Sindhi''': '''snd_Arab''',
'''Somali''': '''som_Latn''',
'''Southern Sotho''': '''sot_Latn''',
'''Spanish''': '''spa_Latn''',
'''Tosk Albanian''': '''als_Latn''',
'''Sardinian''': '''srd_Latn''',
'''Serbian''': '''srp_Cyrl''',
'''Swati''': '''ssw_Latn''',
'''Sundanese''': '''sun_Latn''',
'''Swedish''': '''swe_Latn''',
'''Swahili''': '''swh_Latn''',
'''Silesian''': '''szl_Latn''',
'''Tamil''': '''tam_Taml''',
'''Tatar''': '''tat_Cyrl''',
'''Telugu''': '''tel_Telu''',
'''Tajik''': '''tgk_Cyrl''',
'''Tagalog''': '''tgl_Latn''',
'''Thai''': '''tha_Thai''',
'''Tigrinya''': '''tir_Ethi''',
'''Tamasheq Latin''': '''taq_Latn''',
'''Tamasheq Tifinagh''': '''taq_Tfng''',
'''Tok Pisin''': '''tpi_Latn''',
'''Tswana''': '''tsn_Latn''',
'''Tsonga''': '''tso_Latn''',
'''Turkmen''': '''tuk_Latn''',
'''Tumbuka''': '''tum_Latn''',
'''Turkish''': '''tur_Latn''',
'''Twi''': '''twi_Latn''',
'''Central Atlas Tamazight''': '''tzm_Tfng''',
'''Uyghur''': '''uig_Arab''',
'''Ukrainian''': '''ukr_Cyrl''',
'''Umbundu''': '''umb_Latn''',
'''Urdu''': '''urd_Arab''',
'''Northern Uzbek''': '''uzn_Latn''',
'''Venetian''': '''vec_Latn''',
'''Vietnamese''': '''vie_Latn''',
'''Waray''': '''war_Latn''',
'''Wolof''': '''wol_Latn''',
'''Xhosa''': '''xho_Latn''',
'''Eastern Yiddish''': '''ydd_Hebr''',
'''Yoruba''': '''yor_Latn''',
'''Yue Chinese''': '''yue_Hant''',
'''Chinese Simplified''': '''zho_Hans''',
'''Chinese Traditional''': '''zho_Hant''',
'''Standard Malay''': '''zsm_Latn''',
'''Zulu''': '''zul_Latn''',
}
class snake_case_ ( _a ):
"""simple docstring"""
__UpperCAmelCase ="""facebook/nllb-200-distilled-600M"""
__UpperCAmelCase =(
"""This is a tool that translates text from a language to another. It takes three inputs: `text`, which should """
"""be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, """
"""which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in """
"""plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."""
)
__UpperCAmelCase ="""translator"""
__UpperCAmelCase =AutoTokenizer
__UpperCAmelCase =AutoModelForSeqaSeqLM
__UpperCAmelCase =LANGUAGE_CODES
__UpperCAmelCase =["""text""", """text""", """text"""]
__UpperCAmelCase =["""text"""]
def A__ ( self , _A , _A , _A ):
if src_lang not in self.lang_to_code:
raise ValueError(F"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(F"""{tgt_lang} is not a supported language.""" )
__lowerCAmelCase = self.lang_to_code[src_lang]
__lowerCAmelCase = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
_A , return_tensors='pt' , src_lang=_A , tgt_lang=_A )
def A__ ( self , _A ):
return self.model.generate(**_A )
def A__ ( self , _A ):
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=_A )
| 102
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase = {
'''configuration_data2vec_audio''': ['''DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Data2VecAudioConfig'''],
'''configuration_data2vec_text''': [
'''DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecTextConfig''',
'''Data2VecTextOnnxConfig''',
],
'''configuration_data2vec_vision''': [
'''DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecVisionConfig''',
'''Data2VecVisionOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
'''DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecAudioForAudioFrameClassification''',
'''Data2VecAudioForCTC''',
'''Data2VecAudioForSequenceClassification''',
'''Data2VecAudioForXVector''',
'''Data2VecAudioModel''',
'''Data2VecAudioPreTrainedModel''',
]
lowerCamelCase = [
'''DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecTextForCausalLM''',
'''Data2VecTextForMaskedLM''',
'''Data2VecTextForMultipleChoice''',
'''Data2VecTextForQuestionAnswering''',
'''Data2VecTextForSequenceClassification''',
'''Data2VecTextForTokenClassification''',
'''Data2VecTextModel''',
'''Data2VecTextPreTrainedModel''',
]
lowerCamelCase = [
'''DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecVisionForImageClassification''',
'''Data2VecVisionForMaskedImageModeling''',
'''Data2VecVisionForSemanticSegmentation''',
'''Data2VecVisionModel''',
'''Data2VecVisionPreTrainedModel''',
]
if is_tf_available():
lowerCamelCase = [
'''TFData2VecVisionForImageClassification''',
'''TFData2VecVisionForSemanticSegmentation''',
'''TFData2VecVisionModel''',
'''TFData2VecVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 102
| 1
|
def _A ( SCREAMING_SNAKE_CASE__ : list ):
UpperCamelCase :str = len(SCREAMING_SNAKE_CASE__ )
for i in range(1 , SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Optional[Any] = collection[i]
UpperCamelCase :Dict = 0
UpperCamelCase :int = i - 1
while low <= high:
UpperCamelCase :Tuple = (low + high) // 2
if val < collection[mid]:
UpperCamelCase :Tuple = mid - 1
else:
UpperCamelCase :Any = mid + 1
for j in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , -1 ):
UpperCamelCase :List[Any] = collection[j - 1]
UpperCamelCase :List[Any] = val
return collection
if __name__ == "__main__":
__snake_case = input("""Enter numbers separated by a comma:\n""").strip()
__snake_case = [int(item) for item in user_input.split(""",""")]
print(binary_insertion_sort(unsorted))
| 658
|
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
__snake_case = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"""text-classification""",
"""language-modeling""",
"""summarization""",
"""token-classification""",
"""question-answering""",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
__snake_case = logging.getLogger()
def _A ( ):
UpperCamelCase :List[Any] = argparse.ArgumentParser()
parser.add_argument('''-f''' )
UpperCamelCase :Dict = parser.parse_args()
return args.f
def _A ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int]="eval" ):
UpperCamelCase :Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , F'''{split}_results.json''' )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
with open(SCREAMING_SNAKE_CASE__ , '''r''' ) as f:
return json.load(SCREAMING_SNAKE_CASE__ )
raise ValueError(F'''can\'t find {path}''' )
__snake_case = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase :Union[str, Any] = self.get_auto_remove_tmp_dir()
UpperCamelCase :Optional[Any] = F'''
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_flax_glue.main()
UpperCamelCase :Dict = get_results(SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :int = self.get_auto_remove_tmp_dir()
UpperCamelCase :Optional[Any] = F'''
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_clm_flax.main()
UpperCamelCase :Any = get_results(SCREAMING_SNAKE_CASE_ )
self.assertLess(result['''eval_perplexity'''] , 100 )
@slow
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :Dict = self.get_auto_remove_tmp_dir()
UpperCamelCase :Any = F'''
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_summarization_flax.main()
UpperCamelCase :str = get_results(SCREAMING_SNAKE_CASE_ , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 10 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :List[str] = self.get_auto_remove_tmp_dir()
UpperCamelCase :List[str] = F'''
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_mlm_flax.main()
UpperCamelCase :Dict = get_results(SCREAMING_SNAKE_CASE_ )
self.assertLess(result['''eval_perplexity'''] , 42 )
@slow
def UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase :Optional[Any] = self.get_auto_remove_tmp_dir()
UpperCamelCase :int = F'''
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_ta_mlm_flax.main()
UpperCamelCase :Any = get_results(SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def UpperCAmelCase ( self ) -> Tuple:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
UpperCamelCase :Tuple = 7 if get_gpu_count() > 1 else 2
UpperCamelCase :int = self.get_auto_remove_tmp_dir()
UpperCamelCase :Optional[int] = F'''
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_flax_ner.main()
UpperCamelCase :Any = get_results(SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :List[str] = self.get_auto_remove_tmp_dir()
UpperCamelCase :Dict = F'''
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_qa.main()
UpperCamelCase :int = get_results(SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(result['''eval_f1'''] , 30 )
self.assertGreaterEqual(result['''eval_exact'''] , 30 )
| 658
| 1
|
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
@require_torch
def snake_case_ (self ):
_UpperCAmelCase : Any = pipeline(
task="""zero-shot-audio-classification""" , model="""hf-internal-testing/tiny-clap-htsat-unfused""" )
_UpperCAmelCase : Optional[int] = load_dataset("""ashraq/esc50""" )
_UpperCAmelCase : Optional[int] = dataset["train"]["audio"][-1]["array"]
_UpperCAmelCase : int = audio_classifier(lowerCAmelCase__ , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [{"""score""": 0.5_0_1, """label""": """Sound of a dog"""}, {"""score""": 0.4_9_9, """label""": """Sound of vaccum cleaner"""}] , )
@unittest.skip("""No models are available in TF""" )
def snake_case_ (self ):
pass
@slow
@require_torch
def snake_case_ (self ):
_UpperCAmelCase : List[str] = pipeline(
task="""zero-shot-audio-classification""" , model="""laion/clap-htsat-unfused""" , )
# This is an audio of a dog
_UpperCAmelCase : int = load_dataset("""ashraq/esc50""" )
_UpperCAmelCase : str = dataset["train"]["audio"][-1]["array"]
_UpperCAmelCase : List[Any] = audio_classifier(lowerCAmelCase__ , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [
{"""score""": 0.9_9_9, """label""": """Sound of a dog"""},
{"""score""": 0.0_0_1, """label""": """Sound of vaccum cleaner"""},
] , )
_UpperCAmelCase : Optional[Any] = audio_classifier([audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [
[
{"""score""": 0.9_9_9, """label""": """Sound of a dog"""},
{"""score""": 0.0_0_1, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
_UpperCAmelCase : int = audio_classifier(
[audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] , batch_size=5 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [
[
{"""score""": 0.9_9_9, """label""": """Sound of a dog"""},
{"""score""": 0.0_0_1, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
@unittest.skip("""No models are available in TF""" )
def snake_case_ (self ):
pass
| 704
|
'''simple docstring'''
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __A ( lowerCAmelCase_ , lowerCAmelCase_ ):
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : int = tmp_path / """cache"""
_UpperCAmelCase : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCAmelCase : Tuple = JsonDatasetReader(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , keep_in_memory=lowerCAmelCase_ ).read()
_check_json_dataset(lowerCAmelCase_ , lowerCAmelCase_ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Any = tmp_path / """cache"""
_UpperCAmelCase : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase : int = features.copy() if features else default_expected_features
_UpperCAmelCase : Union[str, Any] = (
Features({feature: Value(lowerCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase : Any = JsonDatasetReader(lowerCAmelCase_ , features=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ ).read()
_check_json_dataset(lowerCAmelCase_ , lowerCAmelCase_ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""},
] , )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Any = tmp_path / """cache"""
_UpperCAmelCase : Optional[Any] = {"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""}
_UpperCAmelCase : int = features.copy() if features else default_expected_features
_UpperCAmelCase : Optional[Any] = (
Features({feature: Value(lowerCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase : Dict = JsonDatasetReader(lowerCAmelCase_ , features=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ ).read()
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def __A ( lowerCAmelCase_ , lowerCAmelCase_ ):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
_UpperCAmelCase : Union[str, Any] = {"""col_2""": """int64""", """col_3""": """float64""", """col_1""": """string"""}
_UpperCAmelCase : Optional[Any] = features.copy()
_UpperCAmelCase : Any = (
Features({feature: Value(lowerCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase : Tuple = tmp_path / """cache"""
_UpperCAmelCase : Optional[Any] = JsonDatasetReader(lowerCAmelCase_ , features=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ ).read()
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Dict = tmp_path / """cache"""
_UpperCAmelCase : List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase : List[Any] = JsonDatasetReader(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , split=lowerCAmelCase_ ).read()
_check_json_dataset(lowerCAmelCase_ , lowerCAmelCase_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
if issubclass(lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Any = jsonl_path
elif issubclass(lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Union[str, Any] = [jsonl_path]
_UpperCAmelCase : int = tmp_path / """cache"""
_UpperCAmelCase : List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase : Any = JsonDatasetReader(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ ).read()
_check_json_dataset(lowerCAmelCase_ , lowerCAmelCase_ )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=("train",) ):
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
for split in splits:
_UpperCAmelCase : List[Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Optional[Any] = tmp_path / """cache"""
_UpperCAmelCase : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCAmelCase : List[Any] = JsonDatasetReader({"""train""": jsonl_path} , cache_dir=lowerCAmelCase_ , keep_in_memory=lowerCAmelCase_ ).read()
_check_json_datasetdict(lowerCAmelCase_ , lowerCAmelCase_ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Optional[int] = tmp_path / """cache"""
_UpperCAmelCase : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase : List[str] = features.copy() if features else default_expected_features
_UpperCAmelCase : int = (
Features({feature: Value(lowerCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase : Any = JsonDatasetReader({"""train""": jsonl_path} , features=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ ).read()
_check_json_datasetdict(lowerCAmelCase_ , lowerCAmelCase_ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
if split:
_UpperCAmelCase : str = {split: jsonl_path}
else:
_UpperCAmelCase : int = """train"""
_UpperCAmelCase : int = {"""train""": jsonl_path, """test""": jsonl_path}
_UpperCAmelCase : Optional[int] = tmp_path / """cache"""
_UpperCAmelCase : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase : Optional[Any] = JsonDatasetReader(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ ).read()
_check_json_datasetdict(lowerCAmelCase_ , lowerCAmelCase_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __A ( lowerCAmelCase_ ):
return json.load(lowerCAmelCase_ )
def __A ( lowerCAmelCase_ ):
return [json.loads(lowerCAmelCase_ ) for line in buffer]
class __lowerCAmelCase :
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCAmelCase__ , lowerCAmelCase__ , lines=lowerCAmelCase__ ).write()
buffer.seek(0 )
_UpperCAmelCase : Optional[int] = load_json_function(lowerCAmelCase__ )
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
assert isinstance(exported_content[0] , lowerCAmelCase__ )
assert len(lowerCAmelCase__ ) == 1_0
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCAmelCase__ , lowerCAmelCase__ , lines=lowerCAmelCase__ , orient=lowerCAmelCase__ ).write()
buffer.seek(0 )
_UpperCAmelCase : str = load_json(lowerCAmelCase__ )
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowerCAmelCase__ , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 1_0
else:
assert len(lowerCAmelCase__ ) == 1_0
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCAmelCase__ , lowerCAmelCase__ , lines=lowerCAmelCase__ , num_proc=2 ).write()
buffer.seek(0 )
_UpperCAmelCase : Optional[int] = load_json_function(lowerCAmelCase__ )
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
assert isinstance(exported_content[0] , lowerCAmelCase__ )
assert len(lowerCAmelCase__ ) == 1_0
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCAmelCase__ , lowerCAmelCase__ , lines=lowerCAmelCase__ , orient=lowerCAmelCase__ , num_proc=2 ).write()
buffer.seek(0 )
_UpperCAmelCase : Optional[Any] = load_json(lowerCAmelCase__ )
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowerCAmelCase__ , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 1_0
else:
assert len(lowerCAmelCase__ ) == 1_0
def snake_case_ (self , lowerCAmelCase__ ):
with pytest.raises(lowerCAmelCase__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCAmelCase__ , lowerCAmelCase__ , num_proc=0 )
@pytest.mark.parametrize("""compression, extension""" , [("""gzip""", """gz"""), ("""bz2""", """bz2"""), ("""xz""", """xz""")] )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Dict = tmp_path_factory.mktemp("""data""" ) / F"test.json.{extension}"
_UpperCAmelCase : List[Any] = str(shared_datadir / F"test_file.json.{extension}" )
JsonDatasetWriter(lowerCAmelCase__ , lowerCAmelCase__ , compression=lowerCAmelCase__ ).write()
with fsspec.open(lowerCAmelCase__ , """rb""" , compression="""infer""" ) as f:
_UpperCAmelCase : str = f.read()
with fsspec.open(lowerCAmelCase__ , """rb""" , compression="""infer""" ) as f:
_UpperCAmelCase : Optional[int] = f.read()
assert exported_content == original_content
| 156
| 0
|
"""simple docstring"""
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def lowerCAmelCase__ ( __magic_name__ ) ->List[Any]:
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class __a ( __a ):
'''simple docstring'''
@staticmethod
def SCREAMING_SNAKE_CASE ( _lowerCamelCase ) -> Tuple:
'''simple docstring'''
__lowercase = parser.add_parser("download" )
download_parser.add_argument(
"--cache-dir" , type=_lowerCamelCase , default=_lowerCamelCase , help="Path to location to store the models" )
download_parser.add_argument(
"--force" , action="store_true" , help="Force the model to be download even if already in cache-dir" )
download_parser.add_argument(
"--trust-remote-code" , action="store_true" , help="Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine" , )
download_parser.add_argument("model" , type=_lowerCamelCase , help="Name of the model to download" )
download_parser.set_defaults(func=_lowerCamelCase )
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = model
__lowercase = cache
__lowercase = force
__lowercase = trust_remote_code
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 118
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
_lowercase = logging.getLogger(__name__)
@dataclass
class __a :
'''simple docstring'''
_lowerCamelCase : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_lowerCamelCase : Optional[str] = field(
default=__a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_lowerCamelCase : Optional[str] = field(
default=__a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_lowerCamelCase : Optional[str] = field(
default=__a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
_lowerCamelCase : bool = field(
default=__a , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
_lowerCamelCase : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
_lowerCamelCase : bool = field(
default=__a , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class __a :
'''simple docstring'''
_lowerCamelCase : Optional[str] = field(default=__a , metadata={"""help""": """The input training data file (a text file)."""} )
_lowerCamelCase : Optional[str] = field(
default=__a , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
_lowerCamelCase : bool = field(
default=__a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
_lowerCamelCase : Optional[int] = field(
default=__a , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
_lowerCamelCase : Optional[int] = field(
default=__a , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_lowerCamelCase : bool = field(
default=__a , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
_lowerCamelCase : Optional[int] = field(
default=__a , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
_lowerCamelCase : Optional[int] = field(
default=__a , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
if self.train_file is not None:
__lowercase = self.train_file.split("." )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
__lowercase = self.validation_file.split("." )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class __a :
'''simple docstring'''
_lowerCamelCase : PreTrainedTokenizerBase
_lowerCamelCase : Union[bool, str, PaddingStrategy] = True
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : Optional[int] = None
def __call__( self , _lowerCamelCase ) -> str:
'''simple docstring'''
__lowercase = "label" if "label" in features[0].keys() else "labels"
__lowercase = [feature.pop(_lowerCamelCase ) for feature in features]
__lowercase = len(_lowerCamelCase )
__lowercase = len(features[0]["input_ids"] )
__lowercase = [
[{k: v[i] for k, v in feature.items()} for i in range(_lowerCamelCase )] for feature in features
]
__lowercase = list(chain(*_lowerCamelCase ) )
__lowercase = self.tokenizer.pad(
_lowerCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
__lowercase = {k: v.view(_lowerCamelCase , _lowerCamelCase , -1 ) for k, v in batch.items()}
# Add back labels
__lowercase = torch.tensor(_lowerCamelCase , dtype=torch.intaa )
return batch
def lowerCAmelCase__ ( ) ->Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowercase , __lowercase , __lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag" , __magic_name__ , __magic_name__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__lowercase = training_args.get_process_log_level()
logger.setLevel(__magic_name__ )
datasets.utils.logging.set_verbosity(__magic_name__ )
transformers.utils.logging.set_verbosity(__magic_name__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
__lowercase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowercase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
__lowercase = {}
if data_args.train_file is not None:
__lowercase = data_args.train_file
if data_args.validation_file is not None:
__lowercase = data_args.validation_file
__lowercase = data_args.train_file.split("." )[-1]
__lowercase = load_dataset(
__magic_name__ , data_files=__magic_name__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
__lowercase = load_dataset(
"swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowercase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__lowercase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__lowercase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__magic_name__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
__lowercase = [F'''ending{i}''' for i in range(4 )]
__lowercase = "sent1"
__lowercase = "sent2"
if data_args.max_seq_length is None:
__lowercase = tokenizer.model_max_length
if max_seq_length > 1_0_2_4:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
__lowercase = 1_0_2_4
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
__lowercase = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(__magic_name__ ):
__lowercase = [[context] * 4 for context in examples[context_name]]
__lowercase = examples[question_header_name]
__lowercase = [
[F'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(__magic_name__ )
]
# Flatten out
__lowercase = list(chain(*__magic_name__ ) )
__lowercase = list(chain(*__magic_name__ ) )
# Tokenize
__lowercase = tokenizer(
__magic_name__ , __magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , padding="max_length" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(__magic_name__ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
__lowercase = raw_datasets["train"]
if data_args.max_train_samples is not None:
__lowercase = min(len(__magic_name__ ) , data_args.max_train_samples )
__lowercase = train_dataset.select(range(__magic_name__ ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
__lowercase = train_dataset.map(
__magic_name__ , batched=__magic_name__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
__lowercase = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
__lowercase = min(len(__magic_name__ ) , data_args.max_eval_samples )
__lowercase = eval_dataset.select(range(__magic_name__ ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
__lowercase = eval_dataset.map(
__magic_name__ , batched=__magic_name__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
__lowercase = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=__magic_name__ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(__magic_name__ ):
__lowercase , __lowercase = eval_predictions
__lowercase = np.argmax(__magic_name__ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
__lowercase = Trainer(
model=__magic_name__ , args=__magic_name__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=__magic_name__ , data_collator=__magic_name__ , compute_metrics=__magic_name__ , )
# Training
if training_args.do_train:
__lowercase = None
if training_args.resume_from_checkpoint is not None:
__lowercase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowercase = last_checkpoint
__lowercase = trainer.train(resume_from_checkpoint=__magic_name__ )
trainer.save_model() # Saves the tokenizer too for easy upload
__lowercase = train_result.metrics
__lowercase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__magic_name__ )
)
__lowercase = min(__magic_name__ , len(__magic_name__ ) )
trainer.log_metrics("train" , __magic_name__ )
trainer.save_metrics("train" , __magic_name__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
__lowercase = trainer.evaluate()
__lowercase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__magic_name__ )
__lowercase = min(__magic_name__ , len(__magic_name__ ) )
trainer.log_metrics("eval" , __magic_name__ )
trainer.save_metrics("eval" , __magic_name__ )
__lowercase = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**__magic_name__ )
else:
trainer.create_model_card(**__magic_name__ )
def lowerCAmelCase__ ( __magic_name__ ) ->Optional[int]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 118
| 1
|
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self, **snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**snake_case__ )
requires_backends(self, """vision""" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self, snake_case__, **snake_case__ ) -> Any:
"""simple docstring"""
return super().__call__(snake_case__, **snake_case__ )
def snake_case__ ( self, **snake_case__ ) -> List[Any]:
"""simple docstring"""
lowercase_ : Tuple = {}
if "candidate_labels" in kwargs:
lowercase_ : Any = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
lowercase_ : List[str] = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def snake_case__ ( self, snake_case__, snake_case__=None, snake_case__="This is a photo of {}." ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : str = load_image(snake_case__ )
lowercase_ : Any = self.image_processor(images=[image], return_tensors=self.framework )
lowercase_ : Any = candidate_labels
lowercase_ : List[Any] = [hypothesis_template.format(snake_case__ ) for x in candidate_labels]
lowercase_ : List[str] = self.tokenizer(snake_case__, return_tensors=self.framework, padding=snake_case__ )
lowercase_ : Any = [text_inputs]
return inputs
def snake_case__ ( self, snake_case__ ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Tuple = model_inputs.pop("""candidate_labels""" )
lowercase_ : List[str] = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0], snake_case__ ):
lowercase_ : str = text_inputs[0]
else:
# Batching case.
lowercase_ : Any = text_inputs[0][0]
lowercase_ : int = self.model(**snake_case__, **snake_case__ )
lowercase_ : str = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_image,
}
return model_outputs
def snake_case__ ( self, snake_case__ ) -> Dict:
"""simple docstring"""
lowercase_ : int = model_outputs.pop("""candidate_labels""" )
lowercase_ : List[Any] = model_outputs["""logits"""][0]
if self.framework == "pt":
lowercase_ : Tuple = logits.softmax(dim=-1 ).squeeze(-1 )
lowercase_ : Union[str, Any] = probs.tolist()
if not isinstance(snake_case__, snake_case__ ):
lowercase_ : int = [scores]
elif self.framework == "tf":
lowercase_ : str = stable_softmax(snake_case__, axis=-1 )
lowercase_ : Dict = probs.numpy().tolist()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
lowercase_ : List[str] = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(snake_case__, snake_case__ ), key=lambda snake_case__ : -x[0] )
]
return result
| 436
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase_ = {
"""configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["""VisionEncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["""TFVisionEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["""FlaxVisionEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 436
| 1
|
"""simple docstring"""
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class A( lowerCamelCase__ ):
"""simple docstring"""
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ ) -> float:
"""simple docstring"""
return 0.0
def A_ ( snake_case__ , snake_case__ ) -> tuple[int | float, int | float]:
_UpperCamelCase :Union[str, Any] = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
_UpperCamelCase :Optional[int] = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def A_ ( snake_case__ , snake_case__ ) -> None:
_UpperCamelCase :List[str] = 5_12
_UpperCamelCase :int = [1] + [0] * (size - 1)
_UpperCamelCase :Union[str, Any] = [filter_type.process(snake_case__ ) for item in inputs]
_UpperCamelCase :Any = [0] * (samplerate - size) # zero-padding
outputs += filler
_UpperCamelCase :int = np.abs(np.fft.fft(snake_case__ ) )
_UpperCamelCase :str = 20 * np.logaa(snake_case__ )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
# Display within reasonable bounds
_UpperCamelCase :Tuple = get_bounds(snake_case__ , snake_case__ )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel('''Gain (dB)''' )
plt.plot(snake_case__ )
plt.show()
def A_ ( snake_case__ , snake_case__ ) -> None:
_UpperCamelCase :Tuple = 5_12
_UpperCamelCase :Union[str, Any] = [1] + [0] * (size - 1)
_UpperCamelCase :List[Any] = [filter_type.process(snake_case__ ) for item in inputs]
_UpperCamelCase :str = [0] * (samplerate - size) # zero-padding
outputs += filler
_UpperCamelCase :Tuple = np.angle(np.fft.fft(snake_case__ ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('''Phase shift (Radians)''' )
plt.plot(np.unwrap(snake_case__ , -2 * pi ) )
plt.show()
| 355
|
"""simple docstring"""
def A_ ( snake_case__ ) -> int:
_UpperCamelCase :Dict = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def A_ ( snake_case__ ) -> int:
_UpperCamelCase :Dict = 0
while number > 0:
_UpperCamelCase :Tuple = number % 10
sum_of_digits += last_digit
_UpperCamelCase :str = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def A_ ( snake_case__ = 1_00 ) -> int:
_UpperCamelCase :int = factorial(snake_case__ )
_UpperCamelCase :Dict = split_and_add(snake_case__ )
return result
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 355
| 1
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def lowerCamelCase ( UpperCAmelCase_ : List[Any] )-> Dict:
"""simple docstring"""
a =384
if "tiny" in model_name:
a =[3, 3, 9, 3]
a =[96, 192, 384, 768]
if "small" in model_name:
a =[3, 3, 27, 3]
a =[96, 192, 384, 768]
if "base" in model_name:
a =[3, 3, 27, 3]
a =[128, 256, 512, 1024]
a =512
if "large" in model_name:
a =[3, 3, 27, 3]
a =[192, 384, 768, 1536]
a =768
if "xlarge" in model_name:
a =[3, 3, 27, 3]
a =[256, 512, 1024, 2048]
a =1024
# set label information
a =150
a ="""huggingface/label-files"""
a ="""ade20k-id2label.json"""
a =json.load(open(hf_hub_download(UpperCAmelCase_ , UpperCAmelCase_ , repo_type="""dataset""" ) , """r""" ) )
a ={int(UpperCAmelCase_ ): v for k, v in idalabel.items()}
a ={v: k for k, v in idalabel.items()}
a =ConvNextConfig(
depths=UpperCAmelCase_ , hidden_sizes=UpperCAmelCase_ , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
a =UperNetConfig(
backbone_config=UpperCAmelCase_ , auxiliary_in_channels=UpperCAmelCase_ , num_labels=UpperCAmelCase_ , idalabel=UpperCAmelCase_ , labelaid=UpperCAmelCase_ , )
return config
def lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] )-> int:
"""simple docstring"""
a =[]
# fmt: off
# stem
rename_keys.append(("""backbone.downsample_layers.0.0.weight""", """backbone.embeddings.patch_embeddings.weight""") )
rename_keys.append(("""backbone.downsample_layers.0.0.bias""", """backbone.embeddings.patch_embeddings.bias""") )
rename_keys.append(("""backbone.downsample_layers.0.1.weight""", """backbone.embeddings.layernorm.weight""") )
rename_keys.append(("""backbone.downsample_layers.0.1.bias""", """backbone.embeddings.layernorm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.stages.{i}.{j}.gamma''', F'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.norm.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.norm.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((F'''backbone.downsample_layers.{i}.0.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.0.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.1.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.1.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def lowerCamelCase ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
a =dct.pop(UpperCAmelCase_ )
a =val
def lowerCamelCase ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str )-> Any:
"""simple docstring"""
a ={
"""upernet-convnext-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth""",
"""upernet-convnext-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth""",
"""upernet-convnext-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth""",
"""upernet-convnext-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth""",
"""upernet-convnext-xlarge""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth""",
}
a =model_name_to_url[model_name]
a =torch.hub.load_state_dict_from_url(UpperCAmelCase_ , map_location="""cpu""" )["""state_dict"""]
a =get_upernet_config(UpperCAmelCase_ )
a =UperNetForSemanticSegmentation(UpperCAmelCase_ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
a =state_dict.pop(UpperCAmelCase_ )
if "bn" in key:
a =key.replace("""bn""" , """batch_norm""" )
a =val
# rename keys
a =create_rename_keys(UpperCAmelCase_ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
model.load_state_dict(UpperCAmelCase_ )
# verify on image
a ="""https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
a =Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw ).convert("""RGB""" )
a =SegformerImageProcessor()
a =processor(UpperCAmelCase_ , return_tensors="""pt""" ).pixel_values
with torch.no_grad():
a =model(UpperCAmelCase_ )
if model_name == "upernet-convnext-tiny":
a =torch.tensor(
[[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]] )
elif model_name == "upernet-convnext-small":
a =torch.tensor(
[[-8.82_36, -8.82_36, -8.67_71], [-8.82_36, -8.82_36, -8.67_71], [-8.76_38, -8.76_38, -8.62_40]] )
elif model_name == "upernet-convnext-base":
a =torch.tensor(
[[-8.85_58, -8.85_58, -8.69_05], [-8.85_58, -8.85_58, -8.69_05], [-8.76_69, -8.76_69, -8.60_21]] )
elif model_name == "upernet-convnext-large":
a =torch.tensor(
[[-8.66_60, -8.66_60, -8.62_10], [-8.66_60, -8.66_60, -8.62_10], [-8.63_10, -8.63_10, -8.59_64]] )
elif model_name == "upernet-convnext-xlarge":
a =torch.tensor(
[[-8.49_80, -8.49_80, -8.39_77], [-8.49_80, -8.49_80, -8.39_77], [-8.43_79, -8.43_79, -8.34_12]] )
print("""Logits:""" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCAmelCase_ , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase_ )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(UpperCAmelCase_ )
if push_to_hub:
print(F'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(F'''openmmlab/{model_name}''' )
processor.push_to_hub(F'''openmmlab/{model_name}''' )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-convnext-tiny''',
type=str,
choices=[f"""upernet-convnext-{size}""" for size in ['''tiny''', '''small''', '''base''', '''large''', '''xlarge''']],
help='''Name of the ConvNext UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_lowerCamelCase = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 321
|
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_lowerCamelCase = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
_lowerCamelCase = 10
_lowerCamelCase = 256
def lowerCamelCase ( UpperCAmelCase_ : List[str] )-> Optional[MinHash]:
"""simple docstring"""
if len(UpperCAmelCase_ ) < MIN_NUM_TOKENS:
return None
a =MinHash(num_perm=UpperCAmelCase_ )
for token in set(UpperCAmelCase_ ):
min_hash.update(token.encode() )
return min_hash
def lowerCamelCase ( UpperCAmelCase_ : str )-> Set[str]:
"""simple docstring"""
return {t for t in NON_ALPHA.split(UpperCAmelCase_ ) if len(t.strip() ) > 0}
class UpperCAmelCase__ :
'''simple docstring'''
def __init__( self , *,
_lowerCAmelCase = 0.85 , ):
a =duplication_jaccard_threshold
a =NUM_PERM
a =MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
a =defaultdict(_lowerCAmelCase )
def lowerCAmelCase__ ( self , _lowerCAmelCase , _lowerCAmelCase ):
a =self._index.query(_lowerCAmelCase )
if code_key in self._index.keys:
print(F'''Duplicate key {code_key}''' )
return
self._index.insert(_lowerCAmelCase , _lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(_lowerCAmelCase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(_lowerCAmelCase )
def lowerCAmelCase__ ( self ):
a =[]
for base, duplicates in self._duplicate_clusters.items():
a =[base] + list(_lowerCAmelCase )
# reformat the cluster to be a list of dict
a =[{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(_lowerCAmelCase )
return duplicate_clusters
def lowerCAmelCase__ ( self , _lowerCAmelCase ):
a =self.get_duplicate_clusters()
with open(_lowerCAmelCase , """w""" ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def lowerCamelCase ( UpperCAmelCase_ : Optional[int] )-> str:
"""simple docstring"""
a , a =element
a =get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def lowerCamelCase ( UpperCAmelCase_ : Type[Dataset] )-> Any:
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(UpperCAmelCase_ , max_queue_size=1_0000 ) , chunksize=100 , ):
if data is not None:
yield data
def lowerCamelCase ( UpperCAmelCase_ : Type[Dataset] , UpperCAmelCase_ : float )-> Union[str, Any]:
"""simple docstring"""
a =DuplicationIndex(duplication_jaccard_threshold=UpperCAmelCase_ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(UpperCAmelCase_ ) ) , max_queue_size=100 ) ):
di.add(UpperCAmelCase_ , UpperCAmelCase_ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : str )-> float:
"""simple docstring"""
a =get_tokens(UpperCAmelCase_ )
a =get_tokens(UpperCAmelCase_ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_lowerCamelCase = None
def lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] )-> List[str]:
"""simple docstring"""
a =[]
for elementa in cluster:
a =_shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
a =_shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(UpperCAmelCase_ , UpperCAmelCase_ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
a =1
extremes.append(UpperCAmelCase_ )
return extremes
def lowerCamelCase ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] )-> int:
"""simple docstring"""
global _shared_dataset
a =dataset
a =[]
a =partial(_find_cluster_extremes_shared , jaccard_threshold=UpperCAmelCase_ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
UpperCAmelCase_ , UpperCAmelCase_ , ) , total=len(UpperCAmelCase_ ) , ):
extremes_list.append(UpperCAmelCase_ )
return extremes_list
def lowerCamelCase ( UpperCAmelCase_ : Type[Dataset] , UpperCAmelCase_ : float = 0.85 )-> Tuple[Type[Dataset], List[List[Dict]]]:
"""simple docstring"""
a =make_duplicate_clusters(UpperCAmelCase_ , UpperCAmelCase_ )
a ={x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
a ={}
a =find_extremes(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
for extremes in extremes_clusters:
for element in extremes:
a =element
a =duplicate_indices - set(extreme_dict.keys() )
a =dataset.filter(lambda UpperCAmelCase_ , UpperCAmelCase_ : idx not in remove_indices , with_indices=UpperCAmelCase_ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
a =element["""base_index"""] in extreme_dict
if element["is_extreme"]:
a =extreme_dict[element["""base_index"""]]["""copies"""]
print(F'''Original dataset size: {len(UpperCAmelCase_ )}''' )
print(F'''Number of duplicate clusters: {len(UpperCAmelCase_ )}''' )
print(F'''Files in duplicate cluster: {len(UpperCAmelCase_ )}''' )
print(F'''Unique files in duplicate cluster: {len(UpperCAmelCase_ )}''' )
print(F'''Filtered dataset size: {len(UpperCAmelCase_ )}''' )
return ds_filter, duplicate_clusters
| 321
| 1
|
'''simple docstring'''
def __snake_case (__UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
return x if y == 0 else greatest_common_divisor(__UpperCAmelCase , x % y )
def __snake_case (__UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
return (x * y) // greatest_common_divisor(__UpperCAmelCase , __UpperCAmelCase )
def __snake_case (__UpperCAmelCase = 20 ):
"""simple docstring"""
lowerCamelCase_ : Dict = 1
for i in range(1 , n + 1 ):
lowerCamelCase_ : List[Any] = lcm(__UpperCAmelCase , __UpperCAmelCase )
return g
if __name__ == "__main__":
print(f"""{solution() = }""")
| 501
|
'''simple docstring'''
import torch
def __snake_case ():
"""simple docstring"""
if torch.cuda.is_available():
lowerCamelCase_ : Optional[int] = torch.cuda.device_count()
else:
lowerCamelCase_ : str = 0
print(F"""Successfully ran on {num_gpus} GPUs""" )
if __name__ == "__main__":
main()
| 501
| 1
|
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
snake_case = get_tests_dir("""fixtures""")
class lowerCAmelCase ( unittest.TestCase ):
def _A ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : str = mock.Mock()
lowerCAmelCase__ : Any = 500
lowerCAmelCase__ : Dict = {}
lowerCAmelCase__ : str = HTTPError
lowerCAmelCase__ : List[str] = {}
# Download this model to make sure it's in the cache.
lowerCAmelCase__ : int = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=a__ ) as mock_head:
lowerCAmelCase__ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# This check we did call the fake head request
mock_head.assert_called()
def _A ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = WavaVecaFeatureExtractor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" )
@is_staging_test
class lowerCAmelCase ( unittest.TestCase ):
@classmethod
def _A ( cls : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : str = TOKEN
HfFolder.save_token(a__ )
@classmethod
def _A ( cls : int ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-feature-extractor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-feature-extractor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-feature-extractor" )
except HTTPError:
pass
def _A ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(a__ )
feature_extractor.push_to_hub("test-feature-extractor" , use_auth_token=self._token )
lowerCAmelCase__ : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(a__ , getattr(a__ , a__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
a__ , repo_id="test-feature-extractor" , push_to_hub=a__ , use_auth_token=self._token )
lowerCAmelCase__ : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(a__ , getattr(a__ , a__ ) )
def _A ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(a__ )
feature_extractor.push_to_hub("valid_org/test-feature-extractor" , use_auth_token=self._token )
lowerCAmelCase__ : int = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(a__ , getattr(a__ , a__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
a__ , repo_id="valid_org/test-feature-extractor-org" , push_to_hub=a__ , use_auth_token=self._token )
lowerCAmelCase__ : Any = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(a__ , getattr(a__ , a__ ) )
def _A ( self : Dict ):
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
lowerCAmelCase__ : List[str] = CustomFeatureExtractor.from_pretrained(a__ )
feature_extractor.push_to_hub("test-dynamic-feature-extractor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"} , )
lowerCAmelCase__ : List[Any] = AutoFeatureExtractor.from_pretrained(
F'''{USER}/test-dynamic-feature-extractor''' , trust_remote_code=a__ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , "CustomFeatureExtractor" )
| 702
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
snake_case = {
"""configuration_convnext""": ["""CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvNextConfig""", """ConvNextOnnxConfig"""]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ["""ConvNextFeatureExtractor"""]
snake_case = ["""ConvNextImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvNextForImageClassification""",
"""ConvNextModel""",
"""ConvNextPreTrainedModel""",
"""ConvNextBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""TFConvNextForImageClassification""",
"""TFConvNextModel""",
"""TFConvNextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 568
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'''google/vit-base-patch16-224''': '''https://huggingface.co/vit-base-patch16-224/resolve/main/config.json''',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : str = '''vit'''
def __init__( self : Dict , _UpperCAmelCase : Dict=768 , _UpperCAmelCase : List[str]=12 , _UpperCAmelCase : Tuple=12 , _UpperCAmelCase : Optional[Any]=3_072 , _UpperCAmelCase : List[Any]="gelu" , _UpperCAmelCase : List[str]=0.0 , _UpperCAmelCase : str=0.0 , _UpperCAmelCase : Any=0.02 , _UpperCAmelCase : List[Any]=1E-1_2 , _UpperCAmelCase : Union[str, Any]=224 , _UpperCAmelCase : Union[str, Any]=16 , _UpperCAmelCase : List[Any]=3 , _UpperCAmelCase : str=True , _UpperCAmelCase : Tuple=16 , **_UpperCAmelCase : List[Any] , ):
super().__init__(**_UpperCAmelCase )
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = initializer_range
_A = layer_norm_eps
_A = image_size
_A = patch_size
_A = num_channels
_A = qkv_bias
_A = encoder_stride
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Dict = version.parse('''1.11''' )
@property
def lowerCAmelCase_ ( self : int ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCAmelCase_ ( self : int ):
return 1E-4
| 7
|
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
class snake_case__ ( __A ):
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> None:
"""simple docstring"""
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" , UpperCamelCase_ , )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
| 419
| 0
|
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
UpperCAmelCase_ : List[Any] = "\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
UpperCAmelCase_ : str = "\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n"
UpperCAmelCase_ : str = "\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n"
def _lowerCAmelCase ( _a : Any ) -> str:
def remove_articles(_a : Union[str, Any] ):
lowerCAmelCase_ : Tuple = re.compile(R"""\b(a|an|the)\b""" , re.UNICODE )
return re.sub(_a , """ """ , _a )
def white_space_fix(_a : Tuple ):
return " ".join(text.split() )
def remove_punc(_a : Union[str, Any] ):
lowerCAmelCase_ : Optional[Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_a : Optional[Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_a ) ) ) )
def _lowerCAmelCase ( _a : str , _a : List[Any] ) -> Tuple:
return int(normalize_answer(_a ) == normalize_answer(_a ) )
def _lowerCAmelCase ( _a : str , _a : Tuple ) -> List[Any]:
lowerCAmelCase_ : List[Any] = [any(compute_exact(_a , _a ) for ref in refs ) for pred, refs in zip(_a , _a )]
return (sum(_a ) / len(_a )) * 1_00
def _lowerCAmelCase ( _a : Tuple , _a : Optional[Any] , _a : Any , _a : List[Any] ) -> Optional[Any]:
lowerCAmelCase_ : int = [rgram for rgrams in rgramslist for rgram in rgrams]
lowerCAmelCase_ : int = Counter(_a )
lowerCAmelCase_ : str = Counter(_a )
lowerCAmelCase_ : Union[str, Any] = Counter()
for sgram, scount in sgramcounter.items():
lowerCAmelCase_ : List[str] = scount * numref
lowerCAmelCase_ : Tuple = Counter(_a )
lowerCAmelCase_ : int = Counter()
for cgram, ccount in cgramcounter.items():
lowerCAmelCase_ : List[str] = ccount * numref
# KEEP
lowerCAmelCase_ : Tuple = sgramcounter_rep & cgramcounter_rep
lowerCAmelCase_ : Any = keepgramcounter_rep & rgramcounter
lowerCAmelCase_ : Union[str, Any] = sgramcounter_rep & rgramcounter
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : List[Any] = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowerCAmelCase_ : int = 1
lowerCAmelCase_ : List[Any] = 1
if len(_a ) > 0:
lowerCAmelCase_ : str = keeptmpscorea / len(_a )
if len(_a ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
lowerCAmelCase_ : List[str] = keeptmpscorea / sum(keepgramcounterall_rep.values() )
lowerCAmelCase_ : str = 0
if keepscore_precision > 0 or keepscore_recall > 0:
lowerCAmelCase_ : Any = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
lowerCAmelCase_ : List[Any] = sgramcounter_rep - cgramcounter_rep
lowerCAmelCase_ : Tuple = delgramcounter_rep - rgramcounter
lowerCAmelCase_ : int = sgramcounter_rep - rgramcounter
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : List[Any] = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowerCAmelCase_ : Union[str, Any] = 1
if len(_a ) > 0:
lowerCAmelCase_ : Dict = deltmpscorea / len(_a )
# ADDITION
lowerCAmelCase_ : List[Any] = set(_a ) - set(_a )
lowerCAmelCase_ : List[Any] = set(_a ) & set(_a )
lowerCAmelCase_ : Tuple = set(_a ) - set(_a )
lowerCAmelCase_ : List[str] = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowerCAmelCase_ : Optional[Any] = 1
lowerCAmelCase_ : int = 1
if len(_a ) > 0:
lowerCAmelCase_ : Tuple = addtmpscore / len(_a )
if len(_a ) > 0:
lowerCAmelCase_ : str = addtmpscore / len(_a )
lowerCAmelCase_ : Tuple = 0
if addscore_precision > 0 or addscore_recall > 0:
lowerCAmelCase_ : Tuple = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def _lowerCAmelCase ( _a : List[Any] , _a : Union[str, Any] , _a : Optional[Any] ) -> List[str]:
lowerCAmelCase_ : Optional[Any] = len(_a )
lowerCAmelCase_ : List[str] = ssent.split(""" """ )
lowerCAmelCase_ : List[Any] = csent.split(""" """ )
lowerCAmelCase_ : int = []
lowerCAmelCase_ : Optional[int] = []
lowerCAmelCase_ : str = []
lowerCAmelCase_ : Dict = []
lowerCAmelCase_ : Optional[Any] = []
lowerCAmelCase_ : List[str] = []
lowerCAmelCase_ : int = []
lowerCAmelCase_ : Optional[Any] = []
lowerCAmelCase_ : Optional[Any] = []
lowerCAmelCase_ : Optional[Any] = []
for rsent in rsents:
lowerCAmelCase_ : List[Any] = rsent.split(""" """ )
lowerCAmelCase_ : List[str] = []
lowerCAmelCase_ : Optional[int] = []
lowerCAmelCase_ : Dict = []
ragramslist.append(_a )
for i in range(0 , len(_a ) - 1 ):
if i < len(_a ) - 1:
lowerCAmelCase_ : int = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(_a )
if i < len(_a ) - 2:
lowerCAmelCase_ : str = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(_a )
if i < len(_a ) - 3:
lowerCAmelCase_ : List[Any] = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(_a )
ragramslist.append(_a )
ragramslist.append(_a )
ragramslist.append(_a )
for i in range(0 , len(_a ) - 1 ):
if i < len(_a ) - 1:
lowerCAmelCase_ : List[Any] = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(_a )
if i < len(_a ) - 2:
lowerCAmelCase_ : Optional[int] = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(_a )
if i < len(_a ) - 3:
lowerCAmelCase_ : Any = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(_a )
for i in range(0 , len(_a ) - 1 ):
if i < len(_a ) - 1:
lowerCAmelCase_ : Dict = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(_a )
if i < len(_a ) - 2:
lowerCAmelCase_ : str = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(_a )
if i < len(_a ) - 3:
lowerCAmelCase_ : str = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(_a )
(lowerCAmelCase_) : Union[str, Any] = SARIngram(_a , _a , _a , _a )
(lowerCAmelCase_) : Optional[Any] = SARIngram(_a , _a , _a , _a )
(lowerCAmelCase_) : Dict = SARIngram(_a , _a , _a , _a )
(lowerCAmelCase_) : Tuple = SARIngram(_a , _a , _a , _a )
lowerCAmelCase_ : Union[str, Any] = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
lowerCAmelCase_ : int = sum([delascore, delascore, delascore, delascore] ) / 4
lowerCAmelCase_ : Optional[int] = sum([addascore, addascore, addascore, addascore] ) / 4
lowerCAmelCase_ : List[Any] = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def _lowerCAmelCase ( _a : Dict , _a : bool = True , _a : str = "13a" , _a : bool = True ) -> str:
if lowercase:
lowerCAmelCase_ : Optional[Any] = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
lowerCAmelCase_ : List[str] = sacrebleu.metrics.bleu._get_tokenizer(_a )()(_a )
else:
lowerCAmelCase_ : Optional[Any] = sacrebleu.TOKENIZERS[tokenizer]()(_a )
elif tokenizer == "moses":
lowerCAmelCase_ : Tuple = sacremoses.MosesTokenizer().tokenize(_a , return_str=_a , escape=_a )
elif tokenizer == "penn":
lowerCAmelCase_ : Dict = sacremoses.MosesTokenizer().penn_tokenize(_a , return_str=_a )
else:
lowerCAmelCase_ : List[Any] = sentence
if not return_str:
lowerCAmelCase_ : str = normalized_sent.split()
return normalized_sent
def _lowerCAmelCase ( _a : Optional[Any] , _a : str , _a : List[Any] ) -> str:
if not (len(_a ) == len(_a ) == len(_a )):
raise ValueError("""Sources length must match predictions and references lengths.""" )
lowerCAmelCase_ : List[Any] = 0
for src, pred, refs in zip(_a , _a , _a ):
sari_score += SARIsent(normalize(_a ) , normalize(_a ) , [normalize(_a ) for sent in refs] )
lowerCAmelCase_ : List[Any] = sari_score / len(_a )
return 1_00 * sari_score
def _lowerCAmelCase ( _a : Tuple , _a : Union[str, Any] , _a : Tuple="exp" , _a : int=None , _a : Dict=False , _a : str=False , _a : int=False , ) -> Optional[int]:
lowerCAmelCase_ : List[Any] = len(references[0] )
if any(len(_a ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
lowerCAmelCase_ : str = [[refs[i] for refs in references] for i in range(_a )]
lowerCAmelCase_ : Any = sacrebleu.corpus_bleu(
_a , _a , smooth_method=_a , smooth_value=_a , force=_a , lowercase=_a , use_effective_order=_a , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
def UpperCAmelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=[
"""https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py""",
"""https://github.com/cocoxu/simplification/blob/master/SARI.py""",
"""https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py""",
"""https://github.com/mjpost/sacreBLEU""",
] , reference_urls=[
"""https://www.aclweb.org/anthology/Q16-1029.pdf""",
"""https://github.com/mjpost/sacreBLEU""",
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase ):
lowerCAmelCase_ : List[Any] = {}
result.update({"""sari""": compute_sari(sources=lowerCamelCase__ , predictions=lowerCamelCase__ , references=lowerCamelCase__ )} )
result.update({"""sacrebleu""": compute_sacrebleu(predictions=lowerCamelCase__ , references=lowerCamelCase__ )} )
result.update({"""exact""": compute_em(predictions=lowerCamelCase__ , references=lowerCamelCase__ )} )
return result
| 718
|
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
UpperCAmelCase_ : int = get_logger(__name__)
class lowercase__ ( enum.Enum ):
__UpperCamelCase = """all_checks"""
__UpperCamelCase = """basic_checks"""
__UpperCamelCase = """no_checks"""
class lowercase__ ( __A ):
pass
class lowercase__ ( __A ):
pass
class lowercase__ ( __A ):
pass
class lowercase__ ( __A ):
pass
def _lowerCAmelCase ( _a : Optional[dict] , _a : dict , _a : str=None ) -> Optional[Any]:
if expected_checksums is None:
logger.info("""Unable to verify checksums.""" )
return
if len(set(_a ) - set(_a ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(_a ) - set(_a ) ) )
if len(set(_a ) - set(_a ) ) > 0:
raise UnexpectedDownloadedFile(str(set(_a ) - set(_a ) ) )
lowerCAmelCase_ : Union[str, Any] = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
lowerCAmelCase_ : Optional[Any] = """ for """ + verification_name if verification_name is not None else """"""
if len(_a ) > 0:
raise NonMatchingChecksumError(
F'Checksums didn\'t match{for_verification_name}:\n'
F'{bad_urls}\n'
"""Set `verification_mode='no_checks'` to skip checksums verification and ignore this error""" )
logger.info("""All the checksums matched successfully""" + for_verification_name )
class lowercase__ ( __A ):
pass
class lowercase__ ( __A ):
pass
class lowercase__ ( __A ):
pass
class lowercase__ ( __A ):
pass
def _lowerCAmelCase ( _a : Optional[dict] , _a : dict ) -> str:
if expected_splits is None:
logger.info("""Unable to verify splits sizes.""" )
return
if len(set(_a ) - set(_a ) ) > 0:
raise ExpectedMoreSplits(str(set(_a ) - set(_a ) ) )
if len(set(_a ) - set(_a ) ) > 0:
raise UnexpectedSplits(str(set(_a ) - set(_a ) ) )
lowerCAmelCase_ : Optional[int] = [
{"""expected""": expected_splits[name], """recorded""": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(_a ) > 0:
raise NonMatchingSplitsSizesError(str(_a ) )
logger.info("""All the splits matched successfully.""" )
def _lowerCAmelCase ( _a : str , _a : bool = True ) -> dict:
if record_checksum:
lowerCAmelCase_ : int = shaaaa()
with open(_a , """rb""" ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , B"""""" ):
m.update(_a )
lowerCAmelCase_ : List[str] = m.hexdigest()
else:
lowerCAmelCase_ : Union[str, Any] = None
return {"num_bytes": os.path.getsize(_a ), "checksum": checksum}
def _lowerCAmelCase ( _a : Tuple ) -> Optional[int]:
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 440
| 0
|
'''simple docstring'''
from __future__ import annotations
snake_case_ : str = '''#'''
class A_ :
'''simple docstring'''
def __init__( self ):
_UpperCamelCase = {}
def a ( self , A_ ):
_UpperCamelCase = self._trie
for char in text:
if char not in trie:
_UpperCamelCase = {}
_UpperCamelCase = trie[char]
_UpperCamelCase = True
def a ( self , A_ ):
_UpperCamelCase = self._trie
for char in prefix:
if char in trie:
_UpperCamelCase = trie[char]
else:
return []
return self._elements(A_ )
def a ( self , A_ ):
_UpperCamelCase = []
for c, v in d.items():
_UpperCamelCase = [" "] if c == END else [(c + s) for s in self._elements(A_ )]
result.extend(A_ )
return tuple(A_ )
snake_case_ : Optional[int] = Trie()
snake_case_ : List[str] = ('''depart''', '''detergent''', '''daring''', '''dog''', '''deer''', '''deal''')
for word in words:
trie.insert_word(word)
def lowercase__( _UpperCamelCase : str )-> tuple:
"""simple docstring"""
_UpperCamelCase = trie.find_word(_UpperCamelCase )
return tuple(string + word for word in suffixes )
def lowercase__( )-> None:
"""simple docstring"""
print(autocomplete_using_trie("de" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 138
|
'''simple docstring'''
def lowercase__( _UpperCamelCase : str )-> str:
"""simple docstring"""
return " ".join(
"".join(word[::-1] ) if len(_UpperCamelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 138
| 1
|
def __a ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
SCREAMING_SNAKE_CASE : str = ''
for i in table:
res += inp[i - 1]
return res
def __a ( __lowerCAmelCase ) -> Optional[Any]:
return data[1:] + data[0]
def __a ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
SCREAMING_SNAKE_CASE : Dict = ''
for i in range(len(__lowerCAmelCase ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def __a ( __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
SCREAMING_SNAKE_CASE : int = int('0b' + data[0] + data[-1] , 2 )
SCREAMING_SNAKE_CASE : Tuple = int('0b' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def __a ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
SCREAMING_SNAKE_CASE : Optional[Any] = message[:4]
SCREAMING_SNAKE_CASE : List[str] = message[4:]
SCREAMING_SNAKE_CASE : List[str] = apply_table(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE : List[str] = xor(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE : Dict = apply_sbox(__lowerCAmelCase , temp[:4] ) # noqa: E741
SCREAMING_SNAKE_CASE : Any = apply_sbox(__lowerCAmelCase , temp[4:] )
SCREAMING_SNAKE_CASE : Dict = '0' * (2 - len(__lowerCAmelCase )) + l # noqa: E741
SCREAMING_SNAKE_CASE : Optional[Any] = '0' * (2 - len(__lowerCAmelCase )) + r
SCREAMING_SNAKE_CASE : List[str] = apply_table(l + r , __lowerCAmelCase )
SCREAMING_SNAKE_CASE : int = xor(__lowerCAmelCase , __lowerCAmelCase )
return temp + right
if __name__ == "__main__":
_lowerCamelCase : List[Any] = input("""Enter 10 bit key: """)
_lowerCamelCase : List[str] = input("""Enter 8 bit message: """)
_lowerCamelCase : Tuple = [6, 3, 7, 4, 8, 5, 10, 9]
_lowerCamelCase : Dict = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
_lowerCamelCase : Dict = [2, 4, 3, 1]
_lowerCamelCase : str = [2, 6, 3, 1, 4, 8, 5, 7]
_lowerCamelCase : int = [4, 1, 3, 5, 7, 2, 8, 6]
_lowerCamelCase : List[str] = [4, 1, 2, 3, 2, 3, 4, 1]
_lowerCamelCase : Union[str, Any] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
_lowerCamelCase : Any = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
_lowerCamelCase : Dict = apply_table(key, paa_table)
_lowerCamelCase : List[str] = temp[:5]
_lowerCamelCase : Any = temp[5:]
_lowerCamelCase : str = left_shift(left)
_lowerCamelCase : Optional[int] = left_shift(right)
_lowerCamelCase : Optional[Any] = apply_table(left + right, pa_table)
_lowerCamelCase : Tuple = left_shift(left)
_lowerCamelCase : Dict = left_shift(right)
_lowerCamelCase : Dict = left_shift(left)
_lowerCamelCase : int = left_shift(right)
_lowerCamelCase : List[Any] = apply_table(left + right, pa_table)
# encryption
_lowerCamelCase : Dict = apply_table(message, IP)
_lowerCamelCase : List[str] = function(expansion, sa, sa, keya, temp)
_lowerCamelCase : str = temp[4:] + temp[:4]
_lowerCamelCase : str = function(expansion, sa, sa, keya, temp)
_lowerCamelCase : Tuple = apply_table(temp, IP_inv)
print("""Cipher text is:""", CT)
# decryption
_lowerCamelCase : Optional[Any] = apply_table(CT, IP)
_lowerCamelCase : str = function(expansion, sa, sa, keya, temp)
_lowerCamelCase : int = temp[4:] + temp[:4]
_lowerCamelCase : Optional[int] = function(expansion, sa, sa, keya, temp)
_lowerCamelCase : int = apply_table(temp, IP_inv)
print("""Plain text after decypting is:""", PT)
| 701
|
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class lowercase :
'''simple docstring'''
def __init__( self : Union[str, Any] , snake_case : Dict , snake_case : Optional[Any]=13 , snake_case : List[str]=7 , snake_case : Optional[Any]=6 , snake_case : Optional[Any]=17 , snake_case : Dict=23 , snake_case : List[Any]=11 , snake_case : Optional[int]=True , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = parent
SCREAMING_SNAKE_CASE : Dict = batch_size
SCREAMING_SNAKE_CASE : Union[str, Any] = seq_length
SCREAMING_SNAKE_CASE : Tuple = act_dim
SCREAMING_SNAKE_CASE : Optional[Any] = state_dim
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : int = max_length
SCREAMING_SNAKE_CASE : int = is_training
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
SCREAMING_SNAKE_CASE : Dict = floats_tensor((self.batch_size, self.seq_length, 1) )
SCREAMING_SNAKE_CASE : int = floats_tensor((self.batch_size, self.seq_length, 1) )
SCREAMING_SNAKE_CASE : Tuple = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000 )
SCREAMING_SNAKE_CASE : Tuple = random_attention_mask((self.batch_size, self.seq_length) )
SCREAMING_SNAKE_CASE : Tuple = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def lowerCamelCase_ ( self : Optional[Any] , snake_case : Optional[Any] , snake_case : int , snake_case : List[str] , snake_case : Any , snake_case : List[Any] , snake_case : int , snake_case : Any , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = DecisionTransformerModel(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE : Union[str, Any] = model(snake_case , snake_case , snake_case , snake_case , snake_case , snake_case )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : Union[str, Any] = config_and_inputs
SCREAMING_SNAKE_CASE : str = {
'states': states,
'actions': actions,
'rewards': rewards,
'returns_to_go': returns_to_go,
'timesteps': timesteps,
'attention_mask': attention_mask,
}
return config, inputs_dict
@require_torch
class lowercase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase):
'''simple docstring'''
UpperCAmelCase : str = (DecisionTransformerModel,) if is_torch_available() else ()
UpperCAmelCase : Optional[int] = ()
UpperCAmelCase : Tuple = {'feature-extraction': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
UpperCAmelCase : int = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
UpperCAmelCase : List[Any] = False
UpperCAmelCase : Tuple = False
UpperCAmelCase : List[Any] = False
UpperCAmelCase : Tuple = False
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : Any = False
UpperCAmelCase : Union[str, Any] = False
UpperCAmelCase : Dict = False
UpperCAmelCase : Optional[int] = False
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = DecisionTransformerModelTester(self )
SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
@slow
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Tuple = DecisionTransformerModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[int] = model_class(snake_case )
SCREAMING_SNAKE_CASE : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : str = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Tuple = [
'states',
'actions',
'rewards',
'returns_to_go',
'timesteps',
'attention_mask',
]
self.assertListEqual(arg_names[: len(snake_case )] , snake_case )
@require_torch
class lowercase ( unittest.TestCase):
'''simple docstring'''
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = 2 # number of steps of autoregressive prediction we will perform
SCREAMING_SNAKE_CASE : str = 10 # defined by the RL environment, may be normalized
SCREAMING_SNAKE_CASE : List[str] = DecisionTransformerModel.from_pretrained('edbeeching/decision-transformer-gym-hopper-expert' )
SCREAMING_SNAKE_CASE : Dict = model.to(snake_case )
SCREAMING_SNAKE_CASE : Optional[Any] = model.config
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = torch.randn(1 , 1 , config.state_dim ).to(device=snake_case , dtype=torch.floataa ) # env.reset()
SCREAMING_SNAKE_CASE : str = torch.tensor(
[[0.242793, -0.28693074, 0.8742613], [0.67815274, -0.08101085, -0.12952147]] , device=snake_case )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(snake_case , device=snake_case , dtype=torch.floataa ).reshape(1 , 1 , 1 )
SCREAMING_SNAKE_CASE : Any = state
SCREAMING_SNAKE_CASE : str = torch.zeros(1 , 0 , config.act_dim , device=snake_case , dtype=torch.floataa )
SCREAMING_SNAKE_CASE : int = torch.zeros(1 , 0 , device=snake_case , dtype=torch.floataa )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(0 , device=snake_case , dtype=torch.long ).reshape(1 , 1 )
for step in range(snake_case ):
SCREAMING_SNAKE_CASE : Any = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=snake_case )] , dim=1 )
SCREAMING_SNAKE_CASE : str = torch.cat([rewards, torch.zeros(1 , 1 , device=snake_case )] , dim=1 )
SCREAMING_SNAKE_CASE : Any = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = model(
states=snake_case , actions=snake_case , rewards=snake_case , returns_to_go=snake_case , timesteps=snake_case , attention_mask=snake_case , return_dict=snake_case , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=snake_case , dtype=torch.floataa ),
1.0,
False,
{},
)
SCREAMING_SNAKE_CASE : List[Any] = action_pred[0, -1]
SCREAMING_SNAKE_CASE : Optional[int] = torch.cat([states, state] , dim=1 )
SCREAMING_SNAKE_CASE : Optional[Any] = returns_to_go[0, -1] - reward
SCREAMING_SNAKE_CASE : Optional[int] = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat(
[timesteps, torch.ones((1, 1) , device=snake_case , dtype=torch.long ) * (step + 1)] , dim=1 )
| 308
| 0
|
"""simple docstring"""
import numpy as np
def UpperCAmelCase ( a__ , a__ , a__ , a__ , a__ ):
'''simple docstring'''
lowerCAmelCase :str = int(np.ceil((x_end - xa) / h ) )
lowerCAmelCase :int = np.zeros((n + 1,) )
lowerCAmelCase :Optional[int] = ya
lowerCAmelCase :str = xa
for k in range(_UpperCAmelCase ):
lowerCAmelCase :Any = f(_UpperCAmelCase , y[k] )
lowerCAmelCase :Tuple = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
lowerCAmelCase :Any = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
lowerCAmelCase :Dict = f(x + h , y[k] + h * ka )
lowerCAmelCase :Any = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 553
|
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
UpperCamelCase_ : List[Any] = 3
def UpperCamelCase ( _UpperCAmelCase : int ) -> int:
'''simple docstring'''
print("Generating primitive root of p" )
while True:
_lowercase : Any = random.randrange(3 , _UpperCAmelCase )
if pow(_UpperCAmelCase , 2 , _UpperCAmelCase ) == 1:
continue
if pow(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) == 1:
continue
return g
def UpperCamelCase ( _UpperCAmelCase : int ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
'''simple docstring'''
print("Generating prime p..." )
_lowercase : Optional[int] = rabin_miller.generate_large_prime(_UpperCAmelCase ) # select large prime number.
_lowercase : Dict = primitive_root(_UpperCAmelCase ) # one primitive root on modulo p.
_lowercase : Any = random.randrange(3 , _UpperCAmelCase ) # private_key -> have to be greater than 2 for safety.
_lowercase : Tuple = cryptomath.find_mod_inverse(pow(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase )
_lowercase : List[Any] = (key_size, e_a, e_a, p)
_lowercase : Union[str, Any] = (key_size, d)
return public_key, private_key
def UpperCamelCase ( _UpperCAmelCase : str , _UpperCAmelCase : int ) -> None:
'''simple docstring'''
if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ):
print("\nWARNING:" )
print(
f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"Use a different name or delete these files and re-run this program." )
sys.exit()
_lowercase , _lowercase : Optional[Any] = generate_key(_UpperCAmelCase )
print(f"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(f"""{name}_pubkey.txt""" , "w" ) as fo:
fo.write(f"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(f"""Writing private key to file {name}_privkey.txt...""" )
with open(f"""{name}_privkey.txt""" , "w" ) as fo:
fo.write(f"""{private_key[0]},{private_key[1]}""" )
def UpperCamelCase ( ) -> None:
'''simple docstring'''
print("Making key files..." )
make_key_files("elgamal" , 2048 )
print("Key files generation successful" )
if __name__ == "__main__":
main()
| 461
| 0
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
UpperCAmelCase_ : List[Any] = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
lowercase : Dict = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowercase : Dict = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowercase : Optional[Any] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowercase : Optional[Any] = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''pt''' )
_SCREAMING_SNAKE_CASE =text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(_A ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
_SCREAMING_SNAKE_CASE =text_classifier('''This is great !''' , top_k=2 )
self.assertEqual(
nested_simplify(_A ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}] )
_SCREAMING_SNAKE_CASE =text_classifier(['''This is great !''', '''This is bad'''] , top_k=2 )
self.assertEqual(
nested_simplify(_A ) , [
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
] , )
_SCREAMING_SNAKE_CASE =text_classifier('''This is great !''' , top_k=1 )
self.assertEqual(nested_simplify(_A ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
# Legacy behavior
_SCREAMING_SNAKE_CASE =text_classifier('''This is great !''' , return_all_scores=_A )
self.assertEqual(nested_simplify(_A ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
_SCREAMING_SNAKE_CASE =text_classifier('''This is great !''' , return_all_scores=_A )
self.assertEqual(
nested_simplify(_A ) , [[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}]] )
_SCREAMING_SNAKE_CASE =text_classifier(['''This is great !''', '''Something else'''] , return_all_scores=_A )
self.assertEqual(
nested_simplify(_A ) , [
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
] , )
_SCREAMING_SNAKE_CASE =text_classifier(['''This is great !''', '''Something else'''] , return_all_scores=_A )
self.assertEqual(
nested_simplify(_A ) , [
{'''label''': '''LABEL_0''', '''score''': 0.504},
{'''label''': '''LABEL_0''', '''score''': 0.504},
] , )
@require_torch
def UpperCamelCase_ ( self ):
'''simple docstring'''
import torch
_SCREAMING_SNAKE_CASE =pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''pt''' , device=torch.device('''cpu''' ) , )
_SCREAMING_SNAKE_CASE =text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(_A ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
@require_tf
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''tf''' )
_SCREAMING_SNAKE_CASE =text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(_A ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
@slow
@require_torch
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =pipeline('''text-classification''' )
_SCREAMING_SNAKE_CASE =text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(_A ) , [{'''label''': '''POSITIVE''', '''score''': 1.0}] )
_SCREAMING_SNAKE_CASE =text_classifier('''This is bad !''' )
self.assertEqual(nested_simplify(_A ) , [{'''label''': '''NEGATIVE''', '''score''': 1.0}] )
_SCREAMING_SNAKE_CASE =text_classifier('''Birds are a type of animal''' )
self.assertEqual(nested_simplify(_A ) , [{'''label''': '''POSITIVE''', '''score''': 0.988}] )
@slow
@require_tf
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =pipeline('''text-classification''' , framework='''tf''' )
_SCREAMING_SNAKE_CASE =text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(_A ) , [{'''label''': '''POSITIVE''', '''score''': 1.0}] )
_SCREAMING_SNAKE_CASE =text_classifier('''This is bad !''' )
self.assertEqual(nested_simplify(_A ) , [{'''label''': '''NEGATIVE''', '''score''': 1.0}] )
_SCREAMING_SNAKE_CASE =text_classifier('''Birds are a type of animal''' )
self.assertEqual(nested_simplify(_A ) , [{'''label''': '''POSITIVE''', '''score''': 0.988}] )
def UpperCamelCase_ ( self , _A , _A , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TextClassificationPipeline(model=_A , tokenizer=_A )
return text_classifier, ["HuggingFace is in", "This is another test"]
def UpperCamelCase_ ( self , _A , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
_SCREAMING_SNAKE_CASE ="HuggingFace is in"
_SCREAMING_SNAKE_CASE =text_classifier(_A )
self.assertEqual(nested_simplify(_A ) , [{'''label''': ANY(_A ), '''score''': ANY(_A )}] )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
_SCREAMING_SNAKE_CASE =["HuggingFace is in ", "Paris is in France"]
_SCREAMING_SNAKE_CASE =text_classifier(_A )
self.assertEqual(
nested_simplify(_A ) , [{'''label''': ANY(_A ), '''score''': ANY(_A )}, {'''label''': ANY(_A ), '''score''': ANY(_A )}] , )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
self.assertTrue(outputs[1]['''label'''] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
_SCREAMING_SNAKE_CASE =text_classifier(_A , top_k=_A )
_SCREAMING_SNAKE_CASE =len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(_A ) , [[{'''label''': ANY(_A ), '''score''': ANY(_A )}] * N, [{'''label''': ANY(_A ), '''score''': ANY(_A )}] * N] , )
_SCREAMING_SNAKE_CASE ={"text": "HuggingFace is in ", "text_pair": "Paris is in France"}
_SCREAMING_SNAKE_CASE =text_classifier(_A )
self.assertEqual(
nested_simplify(_A ) , {'''label''': ANY(_A ), '''score''': ANY(_A )} , )
self.assertTrue(outputs['''label'''] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
_SCREAMING_SNAKE_CASE =[["HuggingFace is in ", "Paris is in France"]]
with self.assertRaises(_A ):
text_classifier(_A )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
_SCREAMING_SNAKE_CASE =text_classifier([[['''HuggingFace is in ''', '''Paris is in France''']]] )
self.assertEqual(
nested_simplify(_A ) , [{'''label''': ANY(_A ), '''score''': ANY(_A )}] , )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
| 715
|
"""simple docstring"""
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {
'''google/owlvit-base-patch32''': '''https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json''',
'''google/owlvit-base-patch16''': '''https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json''',
'''google/owlvit-large-patch14''': '''https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json''',
}
class __UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
lowercase : Any = "owlvit_text_model"
def __init__( self , _A=4_9_4_0_8 , _A=5_1_2 , _A=2_0_4_8 , _A=1_2 , _A=8 , _A=1_6 , _A="quick_gelu" , _A=1E-5 , _A=0.0 , _A=0.02 , _A=1.0 , _A=0 , _A=4_9_4_0_6 , _A=4_9_4_0_7 , **_A , ):
'''simple docstring'''
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
_SCREAMING_SNAKE_CASE =vocab_size
_SCREAMING_SNAKE_CASE =hidden_size
_SCREAMING_SNAKE_CASE =intermediate_size
_SCREAMING_SNAKE_CASE =num_hidden_layers
_SCREAMING_SNAKE_CASE =num_attention_heads
_SCREAMING_SNAKE_CASE =max_position_embeddings
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =layer_norm_eps
_SCREAMING_SNAKE_CASE =attention_dropout
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =initializer_factor
@classmethod
def UpperCamelCase_ ( cls , _A , **_A ):
'''simple docstring'''
cls._set_token_in_kwargs(_A )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =cls.get_config_dict(_A , **_A )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
_SCREAMING_SNAKE_CASE =config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_A , **_A )
class __UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
lowercase : Tuple = "owlvit_vision_model"
def __init__( self , _A=7_6_8 , _A=3_0_7_2 , _A=1_2 , _A=1_2 , _A=3 , _A=7_6_8 , _A=3_2 , _A="quick_gelu" , _A=1E-5 , _A=0.0 , _A=0.02 , _A=1.0 , **_A , ):
'''simple docstring'''
super().__init__(**_A )
_SCREAMING_SNAKE_CASE =hidden_size
_SCREAMING_SNAKE_CASE =intermediate_size
_SCREAMING_SNAKE_CASE =num_hidden_layers
_SCREAMING_SNAKE_CASE =num_attention_heads
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =patch_size
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =layer_norm_eps
_SCREAMING_SNAKE_CASE =attention_dropout
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =initializer_factor
@classmethod
def UpperCamelCase_ ( cls , _A , **_A ):
'''simple docstring'''
cls._set_token_in_kwargs(_A )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =cls.get_config_dict(_A , **_A )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
_SCREAMING_SNAKE_CASE =config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_A , **_A )
class __UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
lowercase : Optional[int] = "owlvit"
lowercase : List[Any] = True
def __init__( self , _A=None , _A=None , _A=5_1_2 , _A=2.6592 , _A=True , **_A , ):
'''simple docstring'''
super().__init__(**_A )
if text_config is None:
_SCREAMING_SNAKE_CASE ={}
logger.info('''text_config is None. Initializing the OwlViTTextConfig with default values.''' )
if vision_config is None:
_SCREAMING_SNAKE_CASE ={}
logger.info('''vision_config is None. initializing the OwlViTVisionConfig with default values.''' )
_SCREAMING_SNAKE_CASE =OwlViTTextConfig(**_A )
_SCREAMING_SNAKE_CASE =OwlViTVisionConfig(**_A )
_SCREAMING_SNAKE_CASE =projection_dim
_SCREAMING_SNAKE_CASE =logit_scale_init_value
_SCREAMING_SNAKE_CASE =return_dict
_SCREAMING_SNAKE_CASE =1.0
@classmethod
def UpperCamelCase_ ( cls , _A , **_A ):
'''simple docstring'''
cls._set_token_in_kwargs(_A )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =cls.get_config_dict(_A , **_A )
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_A , **_A )
@classmethod
def UpperCamelCase_ ( cls , _A , _A , **_A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE =text_config
_SCREAMING_SNAKE_CASE =vision_config
return cls.from_dict(_A , **_A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =copy.deepcopy(self.__dict__ )
_SCREAMING_SNAKE_CASE =self.text_config.to_dict()
_SCREAMING_SNAKE_CASE =self.vision_config.to_dict()
_SCREAMING_SNAKE_CASE =self.__class__.model_type
return output
class __UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
] )
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return OrderedDict(
[
('''logits_per_image''', {0: '''batch'''}),
('''logits_per_text''', {0: '''batch'''}),
('''text_embeds''', {0: '''batch'''}),
('''image_embeds''', {0: '''batch'''}),
] )
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return 1E-4
def UpperCamelCase_ ( self , _A , _A = -1 , _A = -1 , _A = None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =super().generate_dummy_inputs(
processor.tokenizer , batch_size=_A , seq_length=_A , framework=_A )
_SCREAMING_SNAKE_CASE =super().generate_dummy_inputs(
processor.image_processor , batch_size=_A , framework=_A )
return {**text_input_dict, **image_input_dict}
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return 1_4
| 165
| 0
|
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = ["""image_processor""", """feature_extractor"""]
lowerCAmelCase_ = """TvltImageProcessor"""
lowerCAmelCase_ = """TvltFeatureExtractor"""
def __init__( self , A_ , A_ )-> Union[str, Any]:
'''simple docstring'''
super().__init__(image_processor=A_ , feature_extractor=A_ )
UpperCamelCase = image_processor
UpperCamelCase = feature_extractor
def __call__( self , A_=None , A_=None , A_=None , A_=None , A_=False , A_=False , *A_ , **A_ , )-> Dict:
'''simple docstring'''
if images is None and audio is None:
raise ValueError('You need to specify either an `images` or `audio` input to process.' )
UpperCamelCase = None
if images is not None:
UpperCamelCase = self.image_processor(A_ , mask_pixel=A_ , *A_ , **A_ )
if images_mixed is not None:
UpperCamelCase = self.image_processor(A_ , is_mixed=A_ , *A_ , **A_ )
if audio is not None:
UpperCamelCase = self.feature_extractor(
A_ , *A_ , sampling_rate=A_ , mask_audio=A_ , **A_ )
UpperCamelCase = {}
if audio is not None:
output_dict.update(A_ )
if images is not None:
output_dict.update(A_ )
if images_mixed_dict is not None:
output_dict.update(A_ )
return output_dict
@property
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
UpperCamelCase = self.image_processor.model_input_names
UpperCamelCase = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 3
|
from __future__ import annotations
from collections import Counter
from random import random
class a :
'''simple docstring'''
def __init__( self : Optional[Any] ):
UpperCAmelCase_ = {}
def lowerCamelCase_ ( self : int , __snake_case : str ):
UpperCAmelCase_ = {}
def lowerCamelCase_ ( self : int , __snake_case : str , __snake_case : str , __snake_case : float ):
if nodea not in self.connections:
self.add_node(__snake_case )
if nodea not in self.connections:
self.add_node(__snake_case )
UpperCAmelCase_ = probability
def lowerCamelCase_ ( self : List[Any] ):
return list(self.connections )
def lowerCamelCase_ ( self : str , __snake_case : str ):
UpperCAmelCase_ = 0
UpperCAmelCase_ = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : list[tuple[str, str, float]] , __UpperCamelCase : int ) -> dict[str, int]:
UpperCAmelCase_ = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
UpperCAmelCase_ = Counter(graph.get_nodes() )
UpperCAmelCase_ = start
for _ in range(__UpperCamelCase ):
UpperCAmelCase_ = graph.transition(__UpperCamelCase )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 144
| 0
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def _snake_case( SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def _snake_case( SCREAMING_SNAKE_CASE__ : Dict ) -> Tuple:
'''simple docstring'''
A__ = create_tensor(SCREAMING_SNAKE_CASE__ )
A__ = gather(SCREAMING_SNAKE_CASE__ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def _snake_case( SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
'''simple docstring'''
A__ = [state.process_index]
A__ = gather_object(SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == state.num_processes, f'{gathered_obj}, {len(SCREAMING_SNAKE_CASE__ )} != {state.num_processes}'
assert gathered_obj == list(range(state.num_processes ) ), f'{gathered_obj} != {list(range(state.num_processes ) )}'
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Dict:
'''simple docstring'''
A__ = create_tensor(SCREAMING_SNAKE_CASE__ )
A__ = broadcast(SCREAMING_SNAKE_CASE__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def _snake_case( SCREAMING_SNAKE_CASE__ : Any ) -> int:
'''simple docstring'''
if state.is_main_process:
A__ = torch.arange(state.num_processes + 1 ).to(state.device )
else:
A__ = torch.arange(state.num_processes ).to(state.device )
A__ = pad_across_processes(SCREAMING_SNAKE_CASE__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def _snake_case( SCREAMING_SNAKE_CASE__ : Any ) -> int:
'''simple docstring'''
if state.num_processes != 2:
return
A__ = create_tensor(SCREAMING_SNAKE_CASE__ )
A__ = reduce(SCREAMING_SNAKE_CASE__ , 'sum' )
A__ = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), f'{reduced_tensor} != {truth_tensor}'
def _snake_case( SCREAMING_SNAKE_CASE__ : str ) -> Union[str, Any]:
'''simple docstring'''
if state.num_processes != 2:
return
A__ = create_tensor(SCREAMING_SNAKE_CASE__ )
A__ = reduce(SCREAMING_SNAKE_CASE__ , 'mean' )
A__ = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), f'{reduced_tensor} != {truth_tensor}'
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Dict:
'''simple docstring'''
main()
def _snake_case( ) -> List[str]:
'''simple docstring'''
A__ = PartialState()
state.print(f'State: {state}' )
state.print('testing gather' )
test_gather(SCREAMING_SNAKE_CASE__ )
state.print('testing gather_object' )
test_gather_object(SCREAMING_SNAKE_CASE__ )
state.print('testing broadcast' )
test_broadcast(SCREAMING_SNAKE_CASE__ )
state.print('testing pad_across_processes' )
test_pad_across_processes(SCREAMING_SNAKE_CASE__ )
state.print('testing reduce_sum' )
test_reduce_sum(SCREAMING_SNAKE_CASE__ )
state.print('testing reduce_mean' )
test_reduce_mean(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 586
|
from __future__ import annotations
from fractions import Fraction
def _snake_case( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> bool:
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> list[str]:
'''simple docstring'''
A__ = []
A__ = 11
A__ = int('1' + '0' * digit_len )
for num in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
solutions.append(f'{num}/{den}' )
den += 1
num += 1
A__ = 10
return solutions
def _snake_case( SCREAMING_SNAKE_CASE__ : int = 2 ) -> int:
'''simple docstring'''
A__ = 1.0
for fraction in fraction_list(SCREAMING_SNAKE_CASE__ ):
A__ = Fraction(SCREAMING_SNAKE_CASE__ )
result *= frac.denominator / frac.numerator
return int(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
print(solution())
| 586
| 1
|
"""simple docstring"""
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class lowercase__ ( A_ ):
__UpperCAmelCase = 42
__UpperCAmelCase = jnp.floataa
__UpperCAmelCase = True
def UpperCamelCase_ ( self) -> str:
super().setup()
_lowerCamelCase : Optional[int] = nn.Dense(5 , dtype=self.dtype)
def __call__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) -> int:
_lowerCamelCase : Optional[int] = super().__call__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
_lowerCamelCase : Optional[int] = self.cls(outputs[2])
return outputs[:2] + (cls_out,)
class lowercase__ ( A_ ):
__UpperCAmelCase = FlaxBigBirdForNaturalQuestionsModule
def _snake_case ( __snake_case : Tuple , __snake_case : str , __snake_case : List[str] , __snake_case : Dict , __snake_case : Tuple , __snake_case : List[Any] ):
"""simple docstring"""
def cross_entropy(__snake_case : Optional[Any] , __snake_case : str , __snake_case : Optional[int]=None ):
_lowerCamelCase : Optional[int] = logits.shape[-1]
_lowerCamelCase : Any = (labels[..., None] == jnp.arange(__snake_case )[None]).astype("""f4""" )
_lowerCamelCase : Any = jax.nn.log_softmax(__snake_case , axis=-1 )
_lowerCamelCase : Any = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
_lowerCamelCase : int = reduction(__snake_case )
return loss
_lowerCamelCase : int = partial(__snake_case , reduction=jnp.mean )
_lowerCamelCase : int = cross_entropy(__snake_case , __snake_case )
_lowerCamelCase : int = cross_entropy(__snake_case , __snake_case )
_lowerCamelCase : List[str] = cross_entropy(__snake_case , __snake_case )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class lowercase__ :
__UpperCAmelCase = "google/bigbird-roberta-base"
__UpperCAmelCase = 3000
__UpperCAmelCase = 10500
__UpperCAmelCase = 128
__UpperCAmelCase = 3
__UpperCAmelCase = 1
__UpperCAmelCase = 5
# tx_args
__UpperCAmelCase = 3e-5
__UpperCAmelCase = 0.0
__UpperCAmelCase = 20000
__UpperCAmelCase = 0.0_0_9_5
__UpperCAmelCase = "bigbird-roberta-natural-questions"
__UpperCAmelCase = "training-expt"
__UpperCAmelCase = "data/nq-training.jsonl"
__UpperCAmelCase = "data/nq-validation.jsonl"
def UpperCamelCase_ ( self) -> Optional[int]:
os.makedirs(self.base_dir , exist_ok=SCREAMING_SNAKE_CASE)
_lowerCamelCase : Dict = os.path.join(self.base_dir , self.save_dir)
_lowerCamelCase : Tuple = self.batch_size_per_device * jax.device_count()
@dataclass
class lowercase__ :
__UpperCAmelCase = 42
__UpperCAmelCase = 4096 # no dynamic padding on TPUs
def __call__( self , SCREAMING_SNAKE_CASE) -> Optional[int]:
_lowerCamelCase : Dict = self.collate_fn(SCREAMING_SNAKE_CASE)
_lowerCamelCase : int = jax.tree_util.tree_map(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
return batch
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> List[str]:
_lowerCamelCase , _lowerCamelCase : Any = self.fetch_inputs(features["""input_ids"""])
_lowerCamelCase : Dict = {
"""input_ids""": jnp.array(SCREAMING_SNAKE_CASE , dtype=jnp.intaa),
"""attention_mask""": jnp.array(SCREAMING_SNAKE_CASE , dtype=jnp.intaa),
"""start_labels""": jnp.array(features["""start_token"""] , dtype=jnp.intaa),
"""end_labels""": jnp.array(features["""end_token"""] , dtype=jnp.intaa),
"""pooled_labels""": jnp.array(features["""category"""] , dtype=jnp.intaa),
}
return batch
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> int:
_lowerCamelCase : List[str] = [self._fetch_inputs(SCREAMING_SNAKE_CASE) for ids in input_ids]
return zip(*SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> Dict:
_lowerCamelCase : List[Any] = [1 for _ in range(len(SCREAMING_SNAKE_CASE))]
while len(SCREAMING_SNAKE_CASE) < self.max_length:
input_ids.append(self.pad_id)
attention_mask.append(0)
return input_ids, attention_mask
def _snake_case ( __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : Union[str, Any]=None ):
"""simple docstring"""
if seed is not None:
_lowerCamelCase : Optional[Any] = dataset.shuffle(seed=__snake_case )
for i in range(len(__snake_case ) // batch_size ):
_lowerCamelCase : int = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(__snake_case )
@partial(jax.pmap , axis_name="""batch""" )
def _snake_case ( __snake_case : Any , __snake_case : List[str] , **__snake_case : Tuple ):
"""simple docstring"""
def loss_fn(__snake_case : str ):
_lowerCamelCase : List[Any] = model_inputs.pop("""start_labels""" )
_lowerCamelCase : Optional[Any] = model_inputs.pop("""end_labels""" )
_lowerCamelCase : List[str] = model_inputs.pop("""pooled_labels""" )
_lowerCamelCase : Optional[Any] = state.apply_fn(**__snake_case , params=__snake_case , dropout_rng=__snake_case , train=__snake_case )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[Any] = outputs
return state.loss_fn(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
_lowerCamelCase , _lowerCamelCase : List[Any] = jax.random.split(__snake_case )
_lowerCamelCase : Any = jax.value_and_grad(__snake_case )
_lowerCamelCase , _lowerCamelCase : Any = grad_fn(state.params )
_lowerCamelCase : List[str] = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
_lowerCamelCase : Tuple = jax.lax.pmean(__snake_case , """batch""" )
_lowerCamelCase : int = state.apply_gradients(grads=__snake_case )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="""batch""" )
def _snake_case ( __snake_case : Any , **__snake_case : int ):
"""simple docstring"""
_lowerCamelCase : Any = model_inputs.pop("""start_labels""" )
_lowerCamelCase : Any = model_inputs.pop("""end_labels""" )
_lowerCamelCase : Any = model_inputs.pop("""pooled_labels""" )
_lowerCamelCase : Optional[Any] = state.apply_fn(**__snake_case , params=state.params , train=__snake_case )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] = outputs
_lowerCamelCase : Tuple = state.loss_fn(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
_lowerCamelCase : str = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
return metrics
class lowercase__ ( train_state.TrainState ):
__UpperCAmelCase = struct.field(pytree_node=A_ )
@dataclass
class lowercase__ :
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = None
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None) -> Optional[int]:
_lowerCamelCase : Tuple = model.params
_lowerCamelCase : Any = TrainState.create(
apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE , tx=SCREAMING_SNAKE_CASE , loss_fn=SCREAMING_SNAKE_CASE , )
if ckpt_dir is not None:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] = restore_checkpoint(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
_lowerCamelCase : Optional[Any] = {
"""lr""": args.lr,
"""init_lr""": args.init_lr,
"""warmup_steps""": args.warmup_steps,
"""num_train_steps""": num_train_steps,
"""weight_decay""": args.weight_decay,
}
_lowerCamelCase , _lowerCamelCase : Optional[Any] = build_tx(**SCREAMING_SNAKE_CASE)
_lowerCamelCase : Union[str, Any] = train_state.TrainState(
step=SCREAMING_SNAKE_CASE , apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE , tx=SCREAMING_SNAKE_CASE , opt_state=SCREAMING_SNAKE_CASE , )
_lowerCamelCase : List[Any] = args
_lowerCamelCase : Optional[Any] = data_collator
_lowerCamelCase : Optional[Any] = lr
_lowerCamelCase : Optional[Any] = params
_lowerCamelCase : Optional[Any] = jax_utils.replicate(SCREAMING_SNAKE_CASE)
return state
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> Union[str, Any]:
_lowerCamelCase : Dict = self.args
_lowerCamelCase : List[str] = len(SCREAMING_SNAKE_CASE) // args.batch_size
_lowerCamelCase : Optional[Any] = jax.random.PRNGKey(0)
_lowerCamelCase : List[Any] = jax.random.split(SCREAMING_SNAKE_CASE , jax.device_count())
for epoch in range(args.max_epochs):
_lowerCamelCase : Tuple = jnp.array(0 , dtype=jnp.floataa)
_lowerCamelCase : Tuple = get_batched_dataset(SCREAMING_SNAKE_CASE , args.batch_size , seed=SCREAMING_SNAKE_CASE)
_lowerCamelCase : Dict = 0
for batch in tqdm(SCREAMING_SNAKE_CASE , total=SCREAMING_SNAKE_CASE , desc=F'Running EPOCH-{epoch}'):
_lowerCamelCase : Union[str, Any] = self.data_collator(SCREAMING_SNAKE_CASE)
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.train_step_fn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
running_loss += jax_utils.unreplicate(metrics["""loss"""])
i += 1
if i % args.logging_steps == 0:
_lowerCamelCase : Optional[int] = jax_utils.unreplicate(state.step)
_lowerCamelCase : int = running_loss.item() / i
_lowerCamelCase : Tuple = self.scheduler_fn(state_step - 1)
_lowerCamelCase : Optional[Any] = self.evaluate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
_lowerCamelCase : Optional[int] = {
"""step""": state_step.item(),
"""eval_loss""": eval_loss.item(),
"""tr_loss""": tr_loss,
"""lr""": lr.item(),
}
tqdm.write(str(SCREAMING_SNAKE_CASE))
self.logger.log(SCREAMING_SNAKE_CASE , commit=SCREAMING_SNAKE_CASE)
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F'-e{epoch}-s{i}' , state=SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> List[Any]:
_lowerCamelCase : Optional[int] = get_batched_dataset(SCREAMING_SNAKE_CASE , self.args.batch_size)
_lowerCamelCase : Dict = len(SCREAMING_SNAKE_CASE) // self.args.batch_size
_lowerCamelCase : str = jnp.array(0 , dtype=jnp.floataa)
_lowerCamelCase : int = 0
for batch in tqdm(SCREAMING_SNAKE_CASE , total=SCREAMING_SNAKE_CASE , desc="""Evaluating ... """):
_lowerCamelCase : int = self.data_collator(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Dict = self.val_step_fn(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
running_loss += jax_utils.unreplicate(metrics["""loss"""])
i += 1
return running_loss / i
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> List[str]:
_lowerCamelCase : Tuple = jax_utils.unreplicate(SCREAMING_SNAKE_CASE)
print(F'SAVING CHECKPOINT IN {save_dir}' , end=""" ... """)
self.model_save_fn(SCREAMING_SNAKE_CASE , params=state.params)
with open(os.path.join(SCREAMING_SNAKE_CASE , """opt_state.msgpack""") , """wb""") as f:
f.write(to_bytes(state.opt_state))
joblib.dump(self.args , os.path.join(SCREAMING_SNAKE_CASE , """args.joblib"""))
joblib.dump(self.data_collator , os.path.join(SCREAMING_SNAKE_CASE , """data_collator.joblib"""))
with open(os.path.join(SCREAMING_SNAKE_CASE , """training_state.json""") , """w""") as f:
json.dump({"""step""": state.step.item()} , SCREAMING_SNAKE_CASE)
print("""DONE""")
def _snake_case ( __snake_case : Any , __snake_case : Any ):
"""simple docstring"""
print(F'RESTORING CHECKPOINT FROM {save_dir}' , end=""" ... """ )
with open(os.path.join(__snake_case , """flax_model.msgpack""" ) , """rb""" ) as f:
_lowerCamelCase : Dict = from_bytes(state.params , f.read() )
with open(os.path.join(__snake_case , """opt_state.msgpack""" ) , """rb""" ) as f:
_lowerCamelCase : str = from_bytes(state.opt_state , f.read() )
_lowerCamelCase : Optional[Any] = joblib.load(os.path.join(__snake_case , """args.joblib""" ) )
_lowerCamelCase : Optional[int] = joblib.load(os.path.join(__snake_case , """data_collator.joblib""" ) )
with open(os.path.join(__snake_case , """training_state.json""" ) , """r""" ) as f:
_lowerCamelCase : str = json.load(__snake_case )
_lowerCamelCase : Tuple = training_state["""step"""]
print("""DONE""" )
return params, opt_state, step, args, data_collator
def _snake_case ( __snake_case : List[Any] , __snake_case : Dict , __snake_case : List[str] , __snake_case : Any ):
"""simple docstring"""
_lowerCamelCase : List[Any] = num_train_steps - warmup_steps
_lowerCamelCase : Optional[int] = optax.linear_schedule(init_value=__snake_case , end_value=__snake_case , transition_steps=__snake_case )
_lowerCamelCase : List[str] = optax.linear_schedule(init_value=__snake_case , end_value=1E-7 , transition_steps=__snake_case )
_lowerCamelCase : Optional[Any] = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def _snake_case ( __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : int , __snake_case : Optional[Any] , __snake_case : Dict ):
"""simple docstring"""
def weight_decay_mask(__snake_case : Union[str, Any] ):
_lowerCamelCase : Any = traverse_util.flatten_dict(__snake_case )
_lowerCamelCase : List[Any] = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()}
return traverse_util.unflatten_dict(__snake_case )
_lowerCamelCase : List[str] = scheduler_fn(__snake_case , __snake_case , __snake_case , __snake_case )
_lowerCamelCase : str = optax.adamw(learning_rate=__snake_case , weight_decay=__snake_case , mask=__snake_case )
return tx, lr
| 88
|
"""simple docstring"""
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class lowercase ( unittest.TestCase ):
def UpperCAmelCase (self : Tuple ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = '''hf-internal-testing/tiny-random-t5'''
lowerCAmelCase = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = tokenizer('''This is me''' ,return_tensors='''pt''' )
lowerCAmelCase = model.to_bettertransformer()
self.assertTrue(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
lowerCAmelCase = model.generate(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = model.reverse_bettertransformer()
self.assertFalse(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertFalse(
any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
lowerCAmelCase = model_reloaded.generate(**SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) )
def UpperCAmelCase (self : str ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = '''hf-internal-testing/tiny-random-t5'''
lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = model.reverse_bettertransformer()
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
| 535
| 0
|
from typing import List
from .keymap import KEYMAP, get_character
def __UpperCAmelCase ( UpperCAmelCase )-> str:
"""simple docstring"""
def decorator(UpperCAmelCase ):
lowercase = getattr(UpperCAmelCase, '''handle_key''', [] )
handle += [key]
setattr(UpperCAmelCase, '''handle_key''', UpperCAmelCase )
return func
return decorator
def __UpperCAmelCase ( *UpperCAmelCase )-> Optional[int]:
"""simple docstring"""
def decorator(UpperCAmelCase ):
lowercase = getattr(UpperCAmelCase, '''handle_key''', [] )
handle += keys
setattr(UpperCAmelCase, '''handle_key''', UpperCAmelCase )
return func
return decorator
class __lowercase ( _A ):
def __new__( cls : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : int ) -> int:
'''simple docstring'''
lowercase = super().__new__(cls , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if not hasattr(__lowerCamelCase , '''key_handler''' ):
setattr(__lowerCamelCase , '''key_handler''' , {} )
setattr(__lowerCamelCase , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
lowercase = getattr(__lowerCamelCase , '''handle_key''' , [] )
for key in handled_keys:
lowercase = value
return new_cls
@staticmethod
def __a ( cls : Optional[int] ) -> List[str]:
'''simple docstring'''
lowercase = get_character()
if char != KEYMAP["undefined"]:
lowercase = ord(__lowerCamelCase )
lowercase = cls.key_handler.get(__lowerCamelCase )
if handler:
lowercase = char
return handler(cls )
else:
return None
def __UpperCAmelCase ( cls )-> int:
"""simple docstring"""
return KeyHandler(cls.__name__, cls.__bases__, cls.__dict__.copy() )
| 479
|
from __future__ import annotations
from collections import deque
class __lowercase :
def __init__( self : Dict , __lowerCamelCase : list[str] ) -> List[str]:
'''simple docstring'''
lowercase = []
self.adlist.append(
{'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []} )
for keyword in keywords:
self.add_keyword(__lowerCamelCase )
self.set_fail_transitions()
def __a ( self : str , __lowerCamelCase : int , __lowerCamelCase : str ) -> int | None:
'''simple docstring'''
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def __a ( self : List[Any] , __lowerCamelCase : str ) -> None:
'''simple docstring'''
lowercase = 0
for character in keyword:
lowercase = self.find_next_state(__lowerCamelCase , __lowerCamelCase )
if next_state is None:
self.adlist.append(
{
'''value''': character,
'''next_states''': [],
'''fail_state''': 0,
'''output''': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
lowercase = len(self.adlist ) - 1
else:
lowercase = next_state
self.adlist[current_state]["output"].append(__lowerCamelCase )
def __a ( self : int ) -> None:
'''simple docstring'''
lowercase = deque()
for node in self.adlist[0]["next_states"]:
q.append(__lowerCamelCase )
lowercase = 0
while q:
lowercase = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(__lowerCamelCase )
lowercase = self.adlist[r]['''fail_state''']
while (
self.find_next_state(__lowerCamelCase , self.adlist[child]['''value'''] ) is None
and state != 0
):
lowercase = self.adlist[state]['''fail_state''']
lowercase = self.find_next_state(
__lowerCamelCase , self.adlist[child]['''value'''] )
if self.adlist[child]["fail_state"] is None:
lowercase = 0
lowercase = (
self.adlist[child]['''output''']
+ self.adlist[self.adlist[child]['''fail_state''']]['''output''']
)
def __a ( self : List[str] , __lowerCamelCase : str ) -> dict[str, list[int]]:
'''simple docstring'''
lowercase = {} # returns a dict with keywords and list of its occurrences
lowercase = 0
for i in range(len(__lowerCamelCase ) ):
while (
self.find_next_state(__lowerCamelCase , string[i] ) is None
and current_state != 0
):
lowercase = self.adlist[current_state]['''fail_state''']
lowercase = self.find_next_state(__lowerCamelCase , string[i] )
if next_state is None:
lowercase = 0
else:
lowercase = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
lowercase = []
result[key].append(i - len(__lowerCamelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 479
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json""",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : Optional[Any] = '''convbert'''
def __init__( self , lowercase__=3_0_5_2_2 , lowercase__=7_6_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=3_0_7_2 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=1e-12 , lowercase__=1 , lowercase__=0 , lowercase__=2 , lowercase__=7_6_8 , lowercase__=2 , lowercase__=9 , lowercase__=1 , lowercase__=None , **lowercase__ , ):
super().__init__(
pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__ , )
__UpperCAmelCase : List[Any] = vocab_size
__UpperCAmelCase : Tuple = hidden_size
__UpperCAmelCase : List[Any] = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : int = hidden_act
__UpperCAmelCase : Any = hidden_dropout_prob
__UpperCAmelCase : int = attention_probs_dropout_prob
__UpperCAmelCase : List[Any] = max_position_embeddings
__UpperCAmelCase : List[Any] = type_vocab_size
__UpperCAmelCase : Tuple = initializer_range
__UpperCAmelCase : Any = layer_norm_eps
__UpperCAmelCase : Tuple = embedding_size
__UpperCAmelCase : int = head_ratio
__UpperCAmelCase : Union[str, Any] = conv_kernel_size
__UpperCAmelCase : Dict = num_groups
__UpperCAmelCase : List[Any] = classifier_dropout
class lowerCamelCase ( _UpperCamelCase ):
@property
def A( self):
if self.task == "multiple-choice":
__UpperCAmelCase : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCAmelCase : int = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
])
| 462
|
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
lowerCAmelCase = datasets.logging.get_logger(__name__)
lowerCAmelCase = """\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",
author = \"Moosavi, Nafise Sadat and
Strube, Michael\",
booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",
month = aug,
year = \"2016\",
address = \"Berlin, Germany\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/P16-1060\",
doi = \"10.18653/v1/P16-1060\",
pages = \"632--642\",
}
"""
lowerCAmelCase = """\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
"""
lowerCAmelCase = """
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting 'keep_singletons=False', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
'mentions': mentions
'muc': MUC metric [Vilain et al, 1995]
'bcub': B-cubed [Bagga and Baldwin, 1998]
'ceafe': CEAFe [Luo et al., 2005]
'lea': LEA [Moosavi and Strube, 2016]
'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric('coval')
>>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',
... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',
... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',
... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',
... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',
... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{'mentions/recall': 1.0,[...] 'conll_score': 100.0}
"""
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=False , lowercase_=False , lowercase_=True , lowercase_=False , lowercase_="dummy_doc" ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : List[str] = {doc: key_lines}
__UpperCAmelCase : Tuple = {doc: sys_lines}
__UpperCAmelCase : Union[str, Any] = {}
__UpperCAmelCase : Union[str, Any] = 0
__UpperCAmelCase : str = 0
__UpperCAmelCase : int = 0
__UpperCAmelCase : int = 0
__UpperCAmelCase : Union[str, Any] = 0
__UpperCAmelCase : Any = 0
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = reader.get_doc_mentions(lowercase_ , key_doc_lines[doc] , lowercase_ )
key_singletons_num += singletons_num
if NP_only or min_span:
__UpperCAmelCase : List[Any] = reader.set_annotated_parse_trees(lowercase_ , key_doc_lines[doc] , lowercase_ , lowercase_ )
__UpperCAmelCase , __UpperCAmelCase : List[Any] = reader.get_doc_mentions(lowercase_ , sys_doc_lines[doc] , lowercase_ )
sys_singletons_num += singletons_num
if NP_only or min_span:
__UpperCAmelCase : Union[str, Any] = reader.set_annotated_parse_trees(lowercase_ , key_doc_lines[doc] , lowercase_ , lowercase_ )
if remove_nested:
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = reader.remove_nested_coref_mentions(lowercase_ , lowercase_ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
__UpperCAmelCase , __UpperCAmelCase : str = reader.remove_nested_coref_mentions(lowercase_ , lowercase_ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
__UpperCAmelCase : List[Any] = reader.get_mention_assignments(lowercase_ , lowercase_ )
__UpperCAmelCase : Optional[Any] = reader.get_mention_assignments(lowercase_ , lowercase_ )
__UpperCAmelCase : Dict = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
f"annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}" )
logger.info(
'''Number of resulting singleton clusters in the key '''
f"annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}" )
if not keep_singletons:
logger.info(
f"{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system "
'''files, respectively''' )
return doc_coref_infos
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
__UpperCAmelCase : List[Any] = get_coref_infos(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
__UpperCAmelCase : Union[str, Any] = {}
__UpperCAmelCase : Union[str, Any] = 0
__UpperCAmelCase : str = 0
for name, metric in metrics:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = evaluator.evaluate_documents(lowercase_ , lowercase_ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"{name}/recall": recall, f"{name}/precision": precision, f"{name}/f1": fa} )
logger.info(
name.ljust(10 ) , f"Recall: {recall * 100:.2f}" , f" Precision: {precision * 100:.2f}" , f" F1: {fa * 100:.2f}" , )
if conll_subparts_num == 3:
__UpperCAmelCase : Optional[int] = (conll / 3) * 100
logger.info(f"CoNLL score: {conll:.2f}" )
output_scores.update({'''conll_score''': conll} )
return output_scores
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
__UpperCAmelCase : List[Any] = line.split()[5]
if not parse_col == "-":
__UpperCAmelCase : Optional[Any] = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
def A( self):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''')),
'''references''': datasets.Sequence(datasets.Value('''string''')),
}) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] , )
def A( self , lowercase__ , lowercase__ , lowercase__=True , lowercase__=False , lowercase__=False , lowercase__=False):
__UpperCAmelCase : List[Any] = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
__UpperCAmelCase : Optional[Any] = util.check_gold_parse_annotation(lowercase__)
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''')
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
__UpperCAmelCase : Tuple = evaluate(
key_lines=lowercase__ , sys_lines=lowercase__ , metrics=lowercase__ , NP_only=lowercase__ , remove_nested=lowercase__ , keep_singletons=lowercase__ , min_span=lowercase__ , )
return score
| 462
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WavLMForAudioFrameClassification''',
'''WavLMForCTC''',
'''WavLMForSequenceClassification''',
'''WavLMForXVector''',
'''WavLMModel''',
'''WavLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52
|
from typing import Dict
from .base import GenericTensor, Pipeline
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def _a ( self : Any , _snake_case : str=None , _snake_case : Dict=None , _snake_case : Any=None , **_snake_case : str ):
"""simple docstring"""
if tokenize_kwargs is None:
A__ = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
A__ = truncation
A__ = tokenize_kwargs
A__ = {}
if return_tensors is not None:
A__ = return_tensors
return preprocess_params, {}, postprocess_params
def _a ( self : Any , _snake_case : Dict , **_snake_case : Optional[Any] ):
"""simple docstring"""
A__ = self.framework
A__ = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case )
return model_inputs
def _a ( self : List[Any] , _snake_case : Dict ):
"""simple docstring"""
A__ = self.model(**_snake_case )
return model_outputs
def _a ( self : Optional[Any] , _snake_case : List[Any] , _snake_case : str=False ):
"""simple docstring"""
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self : Dict , *_snake_case : int , **_snake_case : List[str] ):
"""simple docstring"""
return super().__call__(*_snake_case , **_snake_case )
| 52
| 1
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCAmelCase ( metaclass=A ):
lowerCAmelCase_ = ["torch", "scipy"]
def __init__( self : Any , *__lowercase : Optional[Any] , **__lowercase : Dict ):
"""simple docstring"""
requires_backends(self , ['torch', 'scipy'] )
@classmethod
def snake_case ( cls : Optional[Any] , *__lowercase : Optional[int] , **__lowercase : List[str] ):
"""simple docstring"""
requires_backends(cls , ['torch', 'scipy'] )
@classmethod
def snake_case ( cls : Union[str, Any] , *__lowercase : List[Any] , **__lowercase : Tuple ):
"""simple docstring"""
requires_backends(cls , ['torch', 'scipy'] )
| 119
|
'''simple docstring'''
def __UpperCamelCase ( lowercase__ : list[int] ):
'''simple docstring'''
if not nums: # Makes sure that the list is not empty
raise ValueError('List is empty' )
__lowercase =sum(lowercase__ ) / len(lowercase__ ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 119
| 1
|
"""simple docstring"""
def A( snake_case_ ):
"""simple docstring"""
return str(snake_case_ ) == str(snake_case_ )[::-1]
def A( snake_case_ ):
"""simple docstring"""
return int(snake_case_ ) + int(str(snake_case_ )[::-1] )
def A( snake_case_ = 10000 ):
"""simple docstring"""
lowercase__: Union[str, Any] = []
for num in range(1 , snake_case_ ):
lowercase__: Optional[Any] = 0
lowercase__: Dict = num
while iterations < 50:
lowercase__: Union[str, Any] = sum_reverse(snake_case_ )
iterations += 1
if is_palindrome(snake_case_ ):
break
else:
lychrel_nums.append(snake_case_ )
return len(snake_case_ )
if __name__ == "__main__":
print(F"{solution() = }")
| 120
|
"""simple docstring"""
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class _a ( lowercase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ = FlaxAutoencoderKL
@property
def __lowercase ( self) -> Dict:
'''simple docstring'''
lowercase__: Dict = 4
lowercase__: str = 3
lowercase__: List[Any] = (32, 32)
lowercase__: str = jax.random.PRNGKey(0)
lowercase__: Tuple = jax.random.uniform(UpperCAmelCase_ , ((batch_size, num_channels) + sizes))
return {"sample": image, "prng_key": prng_key}
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
lowercase__: Any = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
lowercase__: Any = self.dummy_input
return init_dict, inputs_dict
| 120
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase : int = ["""pixel_values"""]
def __init__( self , _A = True , _A = None , _A = PILImageResampling.BICUBIC , _A = True , _A = None , _A = True , _A = 1 / 2_5_5 , _A = True , _A = None , _A = None , _A = True , **_A , ):
'''simple docstring'''
super().__init__(**_A )
_SCREAMING_SNAKE_CASE =size if size is not None else {"""shortest_edge""": 2_2_4}
_SCREAMING_SNAKE_CASE =get_size_dict(_A , default_to_square=_A )
_SCREAMING_SNAKE_CASE =crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
_SCREAMING_SNAKE_CASE =get_size_dict(_A , default_to_square=_A , param_name='''crop_size''' )
_SCREAMING_SNAKE_CASE =do_resize
_SCREAMING_SNAKE_CASE =size
_SCREAMING_SNAKE_CASE =resample
_SCREAMING_SNAKE_CASE =do_center_crop
_SCREAMING_SNAKE_CASE =crop_size
_SCREAMING_SNAKE_CASE =do_rescale
_SCREAMING_SNAKE_CASE =rescale_factor
_SCREAMING_SNAKE_CASE =do_normalize
_SCREAMING_SNAKE_CASE =image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_SCREAMING_SNAKE_CASE =image_std if image_std is not None else OPENAI_CLIP_STD
_SCREAMING_SNAKE_CASE =do_convert_rgb
def UpperCamelCase_ ( self , _A , _A , _A = PILImageResampling.BICUBIC , _A = None , **_A , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =get_size_dict(_A , default_to_square=_A )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
_SCREAMING_SNAKE_CASE =get_resize_output_image_size(_A , size=size['''shortest_edge'''] , default_to_square=_A )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def UpperCamelCase_ ( self , _A , _A , _A = None , **_A , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(_A , size=(size['''height'''], size['''width''']) , data_format=_A , **_A )
def UpperCamelCase_ ( self , _A , _A , _A = None , **_A , ):
'''simple docstring'''
return rescale(_A , scale=_A , data_format=_A , **_A )
def UpperCamelCase_ ( self , _A , _A , _A , _A = None , **_A , ):
'''simple docstring'''
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def UpperCamelCase_ ( self , _A , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = ChannelDimension.FIRST , **_A , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =do_resize if do_resize is not None else self.do_resize
_SCREAMING_SNAKE_CASE =size if size is not None else self.size
_SCREAMING_SNAKE_CASE =get_size_dict(_A , param_name='''size''' , default_to_square=_A )
_SCREAMING_SNAKE_CASE =resample if resample is not None else self.resample
_SCREAMING_SNAKE_CASE =do_center_crop if do_center_crop is not None else self.do_center_crop
_SCREAMING_SNAKE_CASE =crop_size if crop_size is not None else self.crop_size
_SCREAMING_SNAKE_CASE =get_size_dict(_A , param_name='''crop_size''' , default_to_square=_A )
_SCREAMING_SNAKE_CASE =do_rescale if do_rescale is not None else self.do_rescale
_SCREAMING_SNAKE_CASE =rescale_factor if rescale_factor is not None else self.rescale_factor
_SCREAMING_SNAKE_CASE =do_normalize if do_normalize is not None else self.do_normalize
_SCREAMING_SNAKE_CASE =image_mean if image_mean is not None else self.image_mean
_SCREAMING_SNAKE_CASE =image_std if image_std is not None else self.image_std
_SCREAMING_SNAKE_CASE =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_SCREAMING_SNAKE_CASE =make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_SCREAMING_SNAKE_CASE =[convert_to_rgb(_A ) for image in images]
# All transformations expect numpy arrays.
_SCREAMING_SNAKE_CASE =[to_numpy_array(_A ) for image in images]
if do_resize:
_SCREAMING_SNAKE_CASE =[self.resize(image=_A , size=_A , resample=_A ) for image in images]
if do_center_crop:
_SCREAMING_SNAKE_CASE =[self.center_crop(image=_A , size=_A ) for image in images]
if do_rescale:
_SCREAMING_SNAKE_CASE =[self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
_SCREAMING_SNAKE_CASE =[self.normalize(image=_A , mean=_A , std=_A ) for image in images]
_SCREAMING_SNAKE_CASE =[to_channel_dimension_format(_A , _A ) for image in images]
_SCREAMING_SNAKE_CASE ={"""pixel_values""": images}
return BatchFeature(data=_A , tensor_type=_A )
| 255
|
import pytest
_lowerCamelCase ="""__dummy_dataset1__"""
_lowerCamelCase ="""
import json
import os
import datasets
REPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"
URLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
\"tokens\": datasets.Sequence(datasets.Value(\"string\")),
\"ner_tags\": datasets.Sequence(
datasets.features.ClassLabel(
names=[
\"O\",
\"B-PER\",
\"I-PER\",
\"B-ORG\",
\"I-ORG\",
\"B-LOC\",
\"I-LOC\",
]
)
),
\"langs\": datasets.Sequence(datasets.Value(\"string\")),
\"spans\": datasets.Sequence(datasets.Value(\"string\")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),
]
def _generate_examples(self, filepath):
with open(filepath, \"r\", encoding=\"utf-8\") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
"""
@pytest.fixture
def _a ( ):
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def _a ( ):
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Union[str, Any] = dataset_loading_script_name
lowerCamelCase : Dict = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=lowerCamelCase )
lowerCamelCase : str = script_dir / F'''{script_name}.py'''
with open(lowerCamelCase, """w""" ) as f:
f.write(lowerCamelCase )
return str(lowerCamelCase )
| 681
| 0
|
'''simple docstring'''
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Any = ['''image_processor''', '''tokenizer''']
__UpperCAmelCase : Tuple = '''AutoImageProcessor'''
__UpperCAmelCase : Optional[Any] = '''AutoTokenizer'''
def __init__( self : Dict ,_a : Any=None ,_a : Union[str, Any]=None ,**_a : int ):
'''simple docstring'''
_a : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' ,_a ,)
_a : List[Any] = kwargs.pop('feature_extractor' )
_a : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_a ,_a )
_a : Tuple = self.image_processor
_a : int = False
def __call__( self : Union[str, Any] ,*_a : Any ,**_a : str ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*_a ,**_a )
_a : Optional[Any] = kwargs.pop('images' ,_a )
_a : List[str] = kwargs.pop('text' ,_a )
if len(_a ) > 0:
_a : Dict = args[0]
_a : Optional[int] = args[1:]
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
_a : Any = self.image_processor(_a ,*_a ,**_a )
if text is not None:
_a : Optional[Any] = self.tokenizer(_a ,**_a )
if text is None:
return inputs
elif images is None:
return encodings
else:
_a : Optional[int] = encodings['input_ids']
return inputs
def __lowercase ( self : Optional[Any] ,*_a : Dict ,**_a : Optional[int] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_a ,**_a )
def __lowercase ( self : Optional[Any] ,*_a : Tuple ,**_a : Dict ):
'''simple docstring'''
return self.tokenizer.decode(*_a ,**_a )
@contextmanager
def __lowercase ( self : List[str] ):
'''simple docstring'''
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your images inputs, or in a separate call.' )
_a : Union[str, Any] = True
_a : int = self.tokenizer
yield
_a : Union[str, Any] = self.image_processor
_a : Tuple = False
def __lowercase ( self : Optional[Any] ,_a : Dict ,_a : Optional[Any]=False ,_a : List[Any]=None ):
'''simple docstring'''
if added_vocab is None:
_a : Dict = self.tokenizer.get_added_vocab()
_a : Union[str, Any] = {}
while tokens:
_a : Tuple = re.search(R'<s_(.*?)>' ,_a ,re.IGNORECASE )
if start_token is None:
break
_a : int = start_token.group(1 )
_a : int = re.search(RF"""</s_{key}>""" ,_a ,re.IGNORECASE )
_a : Union[str, Any] = start_token.group()
if end_token is None:
_a : Union[str, Any] = tokens.replace(_a ,'' )
else:
_a : List[str] = end_token.group()
_a : Any = re.escape(_a )
_a : Optional[int] = re.escape(_a )
_a : Any = re.search(F"""{start_token_escaped}(.*?){end_token_escaped}""" ,_a ,re.IGNORECASE )
if content is not None:
_a : Union[str, Any] = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
_a : int = self.tokenajson(_a ,is_inner_value=_a ,added_vocab=_a )
if value:
if len(_a ) == 1:
_a : Tuple = value[0]
_a : List[Any] = value
else: # leaf nodes
_a : int = []
for leaf in content.split(R'<sep/>' ):
_a : Any = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
_a : List[str] = leaf[1:-2] # for categorical special tokens
output[key].append(_a )
if len(output[key] ) == 1:
_a : Optional[int] = output[key][0]
_a : Dict = tokens[tokens.find(_a ) + len(_a ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] ,is_inner_value=_a ,added_vocab=_a )
if len(_a ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def __lowercase ( self : List[str] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' ,_a ,)
return self.image_processor_class
@property
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' ,_a ,)
return self.image_processor
| 319
|
'''simple docstring'''
import math
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : List[str] ,_a : Tuple=0 ): # a graph with Node 0,1,...,N-1
'''simple docstring'''
_a : List[Any] = n
_a : int = [
[math.inf for j in range(0 ,_a )] for i in range(0 ,_a )
] # adjacency matrix for weight
_a : List[Any] = [
[math.inf for j in range(0 ,_a )] for i in range(0 ,_a )
] # dp[i][j] stores minimum distance from i to j
def __lowercase ( self : List[str] ,_a : Optional[Any] ,_a : List[Any] ,_a : Union[str, Any] ):
'''simple docstring'''
_a : str = w
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
for k in range(0 ,self.n ):
for i in range(0 ,self.n ):
for j in range(0 ,self.n ):
_a : Optional[Any] = min(self.dp[i][j] ,self.dp[i][k] + self.dp[k][j] )
def __lowercase ( self : Union[str, Any] ,_a : Optional[int] ,_a : int ):
'''simple docstring'''
return self.dp[u][v]
if __name__ == "__main__":
__lowerCAmelCase = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 319
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"vocab_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"
),
"squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt",
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli": (
"https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE : Any = {
"squeezebert/squeezebert-uncased": 5_12,
"squeezebert/squeezebert-mnli": 5_12,
"squeezebert/squeezebert-mnli-headless": 5_12,
}
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"squeezebert/squeezebert-uncased": {"do_lower_case": True},
"squeezebert/squeezebert-mnli": {"do_lower_case": True},
"squeezebert/squeezebert-mnli-headless": {"do_lower_case": True},
}
class snake_case__ ( __A ):
UpperCAmelCase : Tuple = VOCAB_FILES_NAMES
UpperCAmelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : int = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : Tuple = SqueezeBertTokenizer
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_="[UNK]" , UpperCamelCase_="[SEP]" , UpperCamelCase_="[PAD]" , UpperCamelCase_="[CLS]" , UpperCamelCase_="[MASK]" , UpperCamelCase_=True , UpperCamelCase_=None , **UpperCamelCase_ , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , )
a_ : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , UpperCamelCase_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , UpperCamelCase_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , UpperCamelCase_ ) != tokenize_chinese_chars
):
a_ : List[str] = getattr(UpperCamelCase_ , normalizer_state.pop("""type""" ) )
a_ : Any = do_lower_case
a_ : Optional[int] = strip_accents
a_ : Optional[int] = tokenize_chinese_chars
a_ : List[Any] = normalizer_class(**UpperCamelCase_ )
a_ : Optional[Any] = do_lower_case
def A ( self , UpperCamelCase_ , UpperCamelCase_=None ) -> Any:
"""simple docstring"""
a_ : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> List[int]:
"""simple docstring"""
a_ : List[str] = [self.sep_token_id]
a_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> Tuple[str]:
"""simple docstring"""
a_ : str = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
| 419
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[Any] = "▁"
SCREAMING_SNAKE_CASE : str = {"vocab_file": "sentencepiece.bpe.model"}
SCREAMING_SNAKE_CASE : str = {
"vocab_file": {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"
),
}
}
SCREAMING_SNAKE_CASE : Dict = {
"xlm-roberta-base": 5_12,
"xlm-roberta-large": 5_12,
"xlm-roberta-large-finetuned-conll02-dutch": 5_12,
"xlm-roberta-large-finetuned-conll02-spanish": 5_12,
"xlm-roberta-large-finetuned-conll03-english": 5_12,
"xlm-roberta-large-finetuned-conll03-german": 5_12,
}
class snake_case__ ( __A ):
UpperCAmelCase : Dict = VOCAB_FILES_NAMES
UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self , UpperCamelCase_ , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> None:
"""simple docstring"""
a_ : int = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
a_ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
a_ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase_ ) )
a_ : Optional[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
a_ : List[str] = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
a_ : Optional[int] = 1
a_ : int = len(self.sp_model ) + self.fairseq_offset
a_ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> List[Any]:
"""simple docstring"""
a_ : Union[str, Any] = self.__dict__.copy()
a_ : List[Any] = None
a_ : Tuple = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , UpperCamelCase_ ) -> str:
"""simple docstring"""
a_ : List[Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
a_ : int = {}
a_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def A ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a_ : Optional[int] = [self.cls_token_id]
a_ : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def A ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> List[int]:
"""simple docstring"""
a_ : Optional[int] = [self.sep_token_id]
a_ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def A ( self ) -> Optional[int]:
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def A ( self ) -> Dict:
"""simple docstring"""
a_ : Any = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A ( self , UpperCamelCase_ ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def A ( self , UpperCamelCase_ ) -> Optional[Any]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a_ : Dict = self.sp_model.PieceToId(UpperCamelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def A ( self , UpperCamelCase_ ) -> Tuple:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def A ( self , UpperCamelCase_ ) -> Optional[Any]:
"""simple docstring"""
a_ : Dict = """""".join(UpperCamelCase_ ).replace(UpperCamelCase_ , """ """ ).strip()
return out_string
def A ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a_ : List[Any] = os.path.join(
UpperCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , """wb""" ) as fi:
a_ : str = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,)
| 419
| 1
|
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
_lowerCAmelCase = (
'https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'
)
_lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
def a__ ( ) -> Any:
A_ : Optional[int] = '''https://pypi.org/pypi/diffusers/json'''
A_ : List[str] = json.loads(request.urlopen(lowerCAmelCase__ ).read() )['''releases'''].keys()
return sorted(lowerCAmelCase__ , key=lambda a : version.Version(lowerCAmelCase__ ) )
def a__ ( ) -> int:
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(lowerCAmelCase__ )
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
A_ : str = Path(lowerCAmelCase__ ) / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def a__ ( a ) -> Tuple:
init_hf_modules()
A_ : Optional[Any] = Path(lowerCAmelCase__ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
A_ : Dict = dynamic_module_path / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def a__ ( a ) -> Any:
with open(lowerCAmelCase__ , '''r''' , encoding='''utf-8''' ) as f:
A_ : Optional[int] = f.read()
# Imports of the form `import .xxx`
A_ : int = re.findall('''^\s*import\s+\.(\S+)\s*$''' , lowerCAmelCase__ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , lowerCAmelCase__ , flags=re.MULTILINE )
# Unique-ify
return list(set(lowerCAmelCase__ ) )
def a__ ( a ) -> Any:
A_ : Dict = False
A_ : Optional[int] = [module_file]
A_ : int = []
# Let's recurse through all relative imports
while not no_change:
A_ : Dict = []
for f in files_to_check:
new_imports.extend(get_relative_imports(lowerCAmelCase__ ) )
A_ : Optional[int] = Path(lowerCAmelCase__ ).parent
A_ : int = [str(module_path / m ) for m in new_imports]
A_ : str = [f for f in new_import_files if f not in all_relative_imports]
A_ : Any = [f"""{f}.py""" for f in new_import_files]
A_ : Union[str, Any] = len(lowerCAmelCase__ ) == 0
all_relative_imports.extend(lowerCAmelCase__ )
return all_relative_imports
def a__ ( a ) -> Dict:
with open(lowerCAmelCase__ , '''r''' , encoding='''utf-8''' ) as f:
A_ : Any = f.read()
# Imports of the form `import xxx`
A_ : Union[str, Any] = re.findall('''^\s*import\s+(\S+)\s*$''' , lowerCAmelCase__ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('''^\s*from\s+(\S+)\s+import''' , lowerCAmelCase__ , flags=re.MULTILINE )
# Only keep the top-level module
A_ : Optional[int] = [imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )]
# Unique-ify and test we got them all
A_ : Optional[Any] = list(set(lowerCAmelCase__ ) )
A_ : Tuple = []
for imp in imports:
try:
importlib.import_module(lowerCAmelCase__ )
except ImportError:
missing_packages.append(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
raise ImportError(
'''This modeling file requires the following packages that were not found in your environment: '''
f"""{', '.join(lowerCAmelCase__ )}. Run `pip install {' '.join(lowerCAmelCase__ )}`""" )
return get_relative_imports(lowerCAmelCase__ )
def a__ ( a , a ) -> Tuple:
A_ : List[Any] = module_path.replace(os.path.sep , '''.''' )
A_ : Any = importlib.import_module(lowerCAmelCase__ )
if class_name is None:
return find_pipeline_class(lowerCAmelCase__ )
return getattr(lowerCAmelCase__ , lowerCAmelCase__ )
def a__ ( a ) -> List[Any]:
from ..pipelines import DiffusionPipeline
A_ : Dict = dict(inspect.getmembers(lowerCAmelCase__ , inspect.isclass ) )
A_ : Optional[int] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , lowerCAmelCase__ )
and cls.__module__.split('''.''' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"""
f""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"""
f""" {loaded_module}.""" )
A_ : str = cls
return pipeline_class
def a__ ( a , a , a = None , a = False , a = False , a = None , a = None , a = None , a = False , ) -> Tuple:
A_ : Union[str, Any] = str(lowerCAmelCase__ )
A_ : Union[str, Any] = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
if os.path.isfile(lowerCAmelCase__ ):
A_ : Dict = module_file_or_url
A_ : Optional[int] = '''local'''
elif pretrained_model_name_or_path.count('''/''' ) == 0:
A_ : str = get_diffusers_versions()
# cut ".dev0"
A_ : List[str] = '''v''' + '''.'''.join(__version__.split('''.''' )[:3] )
# retrieve github version that matches
if revision is None:
A_ : str = latest_version if latest_version[1:] in available_versions else '''main'''
logger.info(f"""Defaulting to latest_version: {revision}.""" )
elif revision in available_versions:
A_ : List[str] = f"""v{revision}"""
elif revision == "main":
A_ : List[Any] = revision
else:
raise ValueError(
f"""`custom_revision`: {revision} does not exist. Please make sure to choose one of"""
f""" {', '.join(available_versions + ['main'] )}.""" )
# community pipeline on GitHub
A_ : Dict = COMMUNITY_PIPELINES_URL.format(revision=lowerCAmelCase__ , pipeline=lowerCAmelCase__ )
try:
A_ : List[Any] = cached_download(
lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , proxies=lowerCAmelCase__ , resume_download=lowerCAmelCase__ , local_files_only=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , )
A_ : Dict = '''git'''
A_ : Union[str, Any] = pretrained_model_name_or_path + '''.py'''
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
else:
try:
# Load from URL or cache if already cached
A_ : Tuple = hf_hub_download(
lowerCAmelCase__ , lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , proxies=lowerCAmelCase__ , resume_download=lowerCAmelCase__ , local_files_only=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , )
A_ : Tuple = os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) )
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
# Check we have all the requirements in our environment
A_ : Optional[Any] = check_imports(lowerCAmelCase__ )
# Now we move the module inside our cached dynamic modules.
A_ : Optional[int] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(lowerCAmelCase__ )
A_ : Tuple = Path(lowerCAmelCase__ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(lowerCAmelCase__ , submodule_path / module_file )
for module_needed in modules_needed:
A_ : str = f"""{module_needed}.py"""
shutil.copy(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
A_ : List[Any] = use_auth_token
elif use_auth_token is True:
A_ : List[str] = HfFolder.get_token()
else:
A_ : Any = None
A_ : Dict = model_info(lowerCAmelCase__ , revision=lowerCAmelCase__ , token=lowerCAmelCase__ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
A_ : Dict = submodule_path / commit_hash
A_ : Tuple = full_submodule + os.path.sep + commit_hash
create_dynamic_module(lowerCAmelCase__ )
if not (submodule_path / module_file).exists():
shutil.copy(lowerCAmelCase__ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
lowerCAmelCase__ , f"""{module_needed}.py""" , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , resume_download=lowerCAmelCase__ , proxies=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , revision=lowerCAmelCase__ , local_files_only=lowerCAmelCase__ , )
return os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
def a__ ( a , a , a = None , a = None , a = False , a = False , a = None , a = None , a = None , a = False , **a , ) -> int:
A_ : int = get_cached_module_file(
lowerCAmelCase__ , lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , resume_download=lowerCAmelCase__ , proxies=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , revision=lowerCAmelCase__ , local_files_only=lowerCAmelCase__ , )
return get_class_in_module(lowerCAmelCase__ , final_module.replace('''.py''' , '''''' ) )
| 714
|
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class __UpperCAmelCase( A__ ):
"""simple docstring"""
def __init__( self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = True , __magic_name__ = None , __magic_name__ = False , __magic_name__ = None , __magic_name__ = True , __magic_name__ = "arrow" , **__magic_name__ , ):
"""simple docstring"""
super().__init__(
split=__magic_name__ , features=__magic_name__ , cache_dir=__magic_name__ , keep_in_memory=__magic_name__ , streaming=__magic_name__ , **__magic_name__ , )
A_ : Tuple = load_from_cache_file
A_ : List[str] = file_format
A_ : List[Any] = Spark(
df=__magic_name__ , features=__magic_name__ , cache_dir=__magic_name__ , working_dir=__magic_name__ , **__magic_name__ , )
def UpperCAmelCase ( self ):
"""simple docstring"""
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
A_ : Optional[Any] = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__magic_name__ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 236
| 0
|
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class __UpperCamelCase ( A__ ):
def __init__( self , _UpperCamelCase , _UpperCamelCase=768 ):
super().__init__(_UpperCamelCase )
_UpperCAmelCase = proj_size
_UpperCAmelCase = CLIPVisionModel(_UpperCamelCase )
_UpperCAmelCase = PaintByExampleMapper(_UpperCamelCase )
_UpperCAmelCase = nn.LayerNorm(config.hidden_size )
_UpperCAmelCase = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
_UpperCAmelCase = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase=False ):
_UpperCAmelCase = self.model(pixel_values=_UpperCamelCase )
_UpperCAmelCase = clip_output.pooler_output
_UpperCAmelCase = self.mapper(latent_states[:, None] )
_UpperCAmelCase = self.final_layer_norm(_UpperCamelCase )
_UpperCAmelCase = self.proj_out(_UpperCamelCase )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class __UpperCamelCase ( nn.Module ):
def __init__( self , _UpperCamelCase ):
super().__init__()
_UpperCAmelCase = (config.num_hidden_layers + 1) // 5
_UpperCAmelCase = config.hidden_size
_UpperCAmelCase = 1
_UpperCAmelCase = nn.ModuleList(
[
BasicTransformerBlock(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , activation_fn='''gelu''' , attention_bias=_UpperCamelCase )
for _ in range(_UpperCamelCase )
] )
def UpperCamelCase( self , _UpperCamelCase ):
for block in self.blocks:
_UpperCAmelCase = block(_UpperCamelCase )
return hidden_states
| 32
|
"""simple docstring"""
def A__ ( __lowerCamelCase ):
"""simple docstring"""
if not head:
return True
# split the list to two parts
_lowerCAmelCase , _lowerCAmelCase = head.next, head
while fast and fast.next:
_lowerCAmelCase = fast.next.next
_lowerCAmelCase = slow.next
_lowerCAmelCase = slow.next
_lowerCAmelCase = None # Don't forget here! But forget still works!
# reverse the second part
_lowerCAmelCase = None
while second:
_lowerCAmelCase = second.next
_lowerCAmelCase = node
_lowerCAmelCase = second
_lowerCAmelCase = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
_lowerCAmelCase = node.next
_lowerCAmelCase = head.next
return True
def A__ ( __lowerCamelCase ):
"""simple docstring"""
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
_lowerCAmelCase = _lowerCAmelCase = _lowerCAmelCase = head
while fast and fast.next:
_lowerCAmelCase , _lowerCAmelCase = fast.next.next, slow.next
# 2. Push the second half into the stack
_lowerCAmelCase = [slow.val]
while slow.next:
_lowerCAmelCase = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
_lowerCAmelCase = cur.next
return True
def A__ ( __lowerCamelCase ):
"""simple docstring"""
if not head or not head.next:
return True
_lowerCAmelCase = {}
_lowerCAmelCase = 0
while head:
if head.val in d:
d[head.val].append(__lowerCamelCase )
else:
_lowerCAmelCase = [pos]
_lowerCAmelCase = head.next
pos += 1
_lowerCAmelCase = pos - 1
_lowerCAmelCase = 0
for v in d.values():
if len(__lowerCamelCase ) % 2 != 0:
middle += 1
else:
_lowerCAmelCase = 0
for i in range(0, len(__lowerCamelCase ) ):
if v[i] + v[len(__lowerCamelCase ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 589
| 0
|
'''simple docstring'''
lowerCamelCase_ : Tuple = """Alexander Joslin"""
import operator as op
from .stack import Stack
def lowerCAmelCase( __lowerCamelCase ):
__a = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
__a = Stack()
__a = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(A__ ) )
elif i in operators:
# RULE 2
operator_stack.push(A__ )
elif i == ")":
# RULE 4
__a = operator_stack.peek()
operator_stack.pop()
__a = operand_stack.peek()
operand_stack.pop()
__a = operand_stack.peek()
operand_stack.pop()
__a = operators[opr](A__ , A__ )
operand_stack.push(A__ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCamelCase_ : Dict = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
| 714
|
lowerCamelCase_ : Optional[Any] = {
"""Pillow""": """Pillow<10.0.0""",
"""accelerate""": """accelerate>=0.20.3""",
"""av""": """av==9.2.0""",
"""beautifulsoup4""": """beautifulsoup4""",
"""black""": """black~=23.1""",
"""codecarbon""": """codecarbon==1.2.0""",
"""cookiecutter""": """cookiecutter==1.7.3""",
"""dataclasses""": """dataclasses""",
"""datasets""": """datasets!=2.5.0""",
"""decord""": """decord==0.6.0""",
"""deepspeed""": """deepspeed>=0.9.3""",
"""diffusers""": """diffusers""",
"""dill""": """dill<0.3.5""",
"""evaluate""": """evaluate>=0.2.0""",
"""fairscale""": """fairscale>0.3""",
"""faiss-cpu""": """faiss-cpu""",
"""fastapi""": """fastapi""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1,<=0.7.0""",
"""ftfy""": """ftfy""",
"""fugashi""": """fugashi>=1.0""",
"""GitPython""": """GitPython<3.1.19""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.14.1,<1.0""",
"""importlib_metadata""": """importlib_metadata""",
"""ipadic""": """ipadic>=1.0.0,<2.0""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2,<=0.4.13""",
"""jaxlib""": """jaxlib>=0.1.65,<=0.4.13""",
"""jieba""": """jieba""",
"""kenlm""": """kenlm""",
"""keras-nlp""": """keras-nlp>=0.3.1""",
"""librosa""": """librosa""",
"""nltk""": """nltk""",
"""natten""": """natten>=0.14.6""",
"""numpy""": """numpy>=1.17""",
"""onnxconverter-common""": """onnxconverter-common""",
"""onnxruntime-tools""": """onnxruntime-tools>=1.4.2""",
"""onnxruntime""": """onnxruntime>=1.4.0""",
"""opencv-python""": """opencv-python""",
"""optuna""": """optuna""",
"""optax""": """optax>=0.0.8,<=0.1.4""",
"""packaging""": """packaging>=20.0""",
"""parameterized""": """parameterized""",
"""phonemizer""": """phonemizer""",
"""protobuf""": """protobuf""",
"""psutil""": """psutil""",
"""pyyaml""": """pyyaml>=5.1""",
"""pydantic""": """pydantic<2""",
"""pytest""": """pytest>=7.2.0""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""python""": """python>=3.8.0""",
"""ray[tune]""": """ray[tune]""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""rhoknp""": """rhoknp>=1.1.0,<1.3.1""",
"""rjieba""": """rjieba""",
"""rouge-score""": """rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1""",
"""ruff""": """ruff>=0.0.241,<=0.0.259""",
"""sacrebleu""": """sacrebleu>=1.4.12,<2.0.0""",
"""sacremoses""": """sacremoses""",
"""safetensors""": """safetensors>=0.3.1""",
"""sagemaker""": """sagemaker>=2.31.0""",
"""scikit-learn""": """scikit-learn""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""sigopt""": """sigopt""",
"""starlette""": """starlette""",
"""sudachipy""": """sudachipy>=0.6.6""",
"""sudachidict_core""": """sudachidict_core>=20220729""",
"""tensorflow-cpu""": """tensorflow-cpu>=2.6,<2.14""",
"""tensorflow""": """tensorflow>=2.6,<2.14""",
"""tensorflow-text""": """tensorflow-text<2.14""",
"""tf2onnx""": """tf2onnx""",
"""timeout-decorator""": """timeout-decorator""",
"""timm""": """timm""",
"""tokenizers""": """tokenizers>=0.11.1,!=0.11.3,<0.14""",
"""torch""": """torch>=1.9,!=1.12.0""",
"""torchaudio""": """torchaudio""",
"""torchvision""": """torchvision""",
"""pyctcdecode""": """pyctcdecode>=0.4.0""",
"""tqdm""": """tqdm>=4.27""",
"""unidic""": """unidic>=1.0.2""",
"""unidic_lite""": """unidic_lite>=1.0.7""",
"""urllib3""": """urllib3<2.0.0""",
"""uvicorn""": """uvicorn""",
}
| 246
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__A = {
"""configuration_layoutlmv3""": [
"""LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""LayoutLMv3Config""",
"""LayoutLMv3OnnxConfig""",
],
"""processing_layoutlmv3""": ["""LayoutLMv3Processor"""],
"""tokenization_layoutlmv3""": ["""LayoutLMv3Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["""LayoutLMv3TokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"""LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv3ForQuestionAnswering""",
"""LayoutLMv3ForSequenceClassification""",
"""LayoutLMv3ForTokenClassification""",
"""LayoutLMv3Model""",
"""LayoutLMv3PreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"""TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLayoutLMv3ForQuestionAnswering""",
"""TFLayoutLMv3ForSequenceClassification""",
"""TFLayoutLMv3ForTokenClassification""",
"""TFLayoutLMv3Model""",
"""TFLayoutLMv3PreTrainedModel""",
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["""LayoutLMv3FeatureExtractor"""]
__A = ["""LayoutLMv3ImageProcessor"""]
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 93
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
__UpperCAmelCase = tempfile.mkdtemp()
# fmt: off
__UpperCAmelCase = ['', 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
__UpperCAmelCase = dict(zip(__A , range(len(__A ) ) ) )
__UpperCAmelCase = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
__UpperCAmelCase = {'unk_token': '<unk>'}
__UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__A ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__A ) )
__UpperCAmelCase = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'image_std': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
__UpperCAmelCase = os.path.join(self.tmpdirname , __A )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(__A , __A )
def __lowerCamelCase ( self , **__A ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='!' , **__A )
def __lowerCamelCase ( self , **__A ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='!' , **__A )
def __lowerCamelCase ( self , **__A ):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__A )
def __lowerCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def __lowerCamelCase ( self ):
__UpperCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__UpperCAmelCase = [Image.fromarray(np.moveaxis(__A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = self.get_rust_tokenizer()
__UpperCAmelCase = self.get_image_processor()
__UpperCAmelCase = OwlViTProcessor(tokenizer=__A , image_processor=__A )
processor_slow.save_pretrained(self.tmpdirname )
__UpperCAmelCase = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__A )
__UpperCAmelCase = OwlViTProcessor(tokenizer=__A , image_processor=__A )
processor_fast.save_pretrained(self.tmpdirname )
__UpperCAmelCase = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __A )
self.assertIsInstance(processor_fast.tokenizer , __A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __A )
self.assertIsInstance(processor_fast.image_processor , __A )
def __lowerCamelCase ( self ):
__UpperCAmelCase = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__UpperCAmelCase = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__UpperCAmelCase = self.get_image_processor(do_normalize=__A )
__UpperCAmelCase = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__A )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __A )
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.get_image_processor()
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = OwlViTProcessor(tokenizer=__A , image_processor=__A )
__UpperCAmelCase = self.prepare_image_inputs()
__UpperCAmelCase = image_processor(__A , return_tensors='np' )
__UpperCAmelCase = processor(images=__A , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.get_image_processor()
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = OwlViTProcessor(tokenizer=__A , image_processor=__A )
__UpperCAmelCase = 'lower newer'
__UpperCAmelCase = processor(text=__A , return_tensors='np' )
__UpperCAmelCase = tokenizer(__A , return_tensors='np' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.get_image_processor()
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = OwlViTProcessor(tokenizer=__A , image_processor=__A )
__UpperCAmelCase = 'lower newer'
__UpperCAmelCase = self.prepare_image_inputs()
__UpperCAmelCase = processor(text=__A , images=__A )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def __lowerCamelCase ( self ):
__UpperCAmelCase = 'google/owlvit-base-patch32'
__UpperCAmelCase = OwlViTProcessor.from_pretrained(__A )
__UpperCAmelCase = ['cat', 'nasa badge']
__UpperCAmelCase = processor(text=__A )
__UpperCAmelCase = 16
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def __lowerCamelCase ( self ):
__UpperCAmelCase = 'google/owlvit-base-patch32'
__UpperCAmelCase = OwlViTProcessor.from_pretrained(__A )
__UpperCAmelCase = [['cat', 'nasa badge'], ['person']]
__UpperCAmelCase = processor(text=__A )
__UpperCAmelCase = 16
__UpperCAmelCase = len(__A )
__UpperCAmelCase = max([len(__A ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def __lowerCamelCase ( self ):
__UpperCAmelCase = 'google/owlvit-base-patch32'
__UpperCAmelCase = OwlViTProcessor.from_pretrained(__A )
__UpperCAmelCase = ['cat', 'nasa badge']
__UpperCAmelCase = processor(text=__A )
__UpperCAmelCase = 16
__UpperCAmelCase = inputs['input_ids']
__UpperCAmelCase = [
[49_406, 2_368, 49_407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49_406, 6_841, 11_301, 49_407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.get_image_processor()
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = OwlViTProcessor(tokenizer=__A , image_processor=__A )
__UpperCAmelCase = self.prepare_image_inputs()
__UpperCAmelCase = self.prepare_image_inputs()
__UpperCAmelCase = processor(images=__A , query_images=__A )
self.assertListEqual(list(inputs.keys() ) , ['query_pixel_values', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.get_image_processor()
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = OwlViTProcessor(tokenizer=__A , image_processor=__A )
__UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCAmelCase = processor.batch_decode(__A )
__UpperCAmelCase = tokenizer.batch_decode(__A )
self.assertListEqual(__A , __A )
| 126
| 0
|
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class A ( unittest.TestCase ):
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 32 , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = 1 / 255 , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = [0.48_145_466, 0.4_578_275, 0.40_821_073] , SCREAMING_SNAKE_CASE = [0.26_862_954, 0.26_130_258, 0.27_577_711] , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=30 , SCREAMING_SNAKE_CASE=400 , SCREAMING_SNAKE_CASE=3 , ) -> Any:
"""simple docstring"""
A : int = parent
A : Dict = do_resize
A : int = size if size is not None else {'''shortest_edge''': 288}
A : Dict = size_divisor
A : Optional[int] = do_rescale
A : str = rescale_factor
A : int = do_normalize
A : Any = do_center_crop
A : Union[str, Any] = image_mean
A : Any = image_std
A : int = do_pad
A : List[str] = batch_size
A : Any = num_channels
A : Union[str, Any] = min_resolution
A : Optional[Any] = max_resolution
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Tuple:
"""simple docstring"""
if not batched:
A : Optional[int] = self.size['''shortest_edge''']
A : str = image_inputs[0]
if isinstance(SCREAMING_SNAKE_CASE , Image.Image ):
A, A : Tuple = image.size
else:
A, A : Optional[int] = image.shape[1], image.shape[2]
A : List[Any] = size / min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if h < w:
A, A : Optional[Any] = size, scale * w
else:
A, A : Tuple = scale * h, size
A : Any = int((1333 / 800) * size )
if max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) > max_size:
A : Tuple = max_size / max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : int = newh * scale
A : List[str] = neww * scale
A, A : Optional[Any] = int(newh + 0.5 ), int(neww + 0.5 )
A, A : Any = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
A : Any = []
for image in image_inputs:
A, A : Union[str, Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A : Dict = max(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : item[0] )[0]
A : List[str] = max(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A ( __snake_case , unittest.TestCase ):
__magic_name__ = BridgeTowerImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : Tuple = BridgeTowerImageProcessingTester(self )
@property
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , '''image_mean''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , '''image_std''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , '''do_normalize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , '''do_resize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , '''size''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , '''size_divisor''' ) )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
pass
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
A : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
A, A : Union[str, Any] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A : List[str] = image_processing(SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
A, A : List[Any] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , numpify=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
A : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
A, A : Tuple = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A : str = image_processing(SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
A, A : Dict = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , torchify=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
A : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
A, A : str = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A : Tuple = image_processing(SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
A, A : Optional[Any] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 343
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowercase : Union[str, Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
@dataclass
class A :
__magic_name__ = field(
default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
__magic_name__ = field(
default=__snake_case , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__magic_name__ = field(
default=__snake_case , metadata={'''help''': '''The column name of the images in the files.'''} )
__magic_name__ = field(default=__snake_case , metadata={'''help''': '''A folder containing the training data.'''} )
__magic_name__ = field(default=__snake_case , metadata={'''help''': '''A folder containing the validation data.'''} )
__magic_name__ = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
__magic_name__ = field(
default=__snake_case , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
__magic_name__ = field(
default=__snake_case , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : Tuple = {}
if self.train_dir is not None:
A : Optional[Any] = self.train_dir
if self.validation_dir is not None:
A : str = self.validation_dir
A : Dict = data_files if data_files else None
@dataclass
class A :
__magic_name__ = field(
default=__snake_case , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
__magic_name__ = field(
default=__snake_case , metadata={'''help''': '''Pretrained config name or path if not the same as model_name_or_path'''} )
__magic_name__ = field(
default=__snake_case , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
__magic_name__ = field(
default=__snake_case , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} )
__magic_name__ = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
__magic_name__ = field(default=__snake_case , metadata={'''help''': '''Name or path of preprocessor config.'''} )
__magic_name__ = field(
default=__snake_case , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
__magic_name__ = field(
default=0.75 , metadata={'''help''': '''The ratio of the number of masked tokens in the input sequence.'''} )
__magic_name__ = field(
default=__snake_case , metadata={'''help''': '''Whether or not to train with normalized pixel values as target.'''} )
@dataclass
class A ( __snake_case ):
__magic_name__ = field(
default=1E-3 , metadata={'''help''': '''Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'''} )
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : List[str] = torch.stack([example['''pixel_values'''] for example in examples] )
return {"pixel_values": pixel_values}
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A, A, A : List[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A, A, A : Any = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_mae''' , snake_case__ , snake_case__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
A : Optional[int] = training_args.get_process_log_level()
logger.setLevel(snake_case__ )
transformers.utils.logging.set_verbosity(snake_case__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
A : str = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A : Union[str, Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset.
A : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
A : Optional[Any] = None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , snake_case__ ) and data_args.train_val_split > 0.0:
A : Tuple = ds['''train'''].train_test_split(data_args.train_val_split )
A : str = split['''train''']
A : List[str] = split['''test''']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A : str = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
A : List[Any] = ViTMAEConfig.from_pretrained(model_args.config_name , **snake_case__ )
elif model_args.model_name_or_path:
A : Tuple = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **snake_case__ )
else:
A : Any = ViTMAEConfig()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(F'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(F'New config: {config}' )
# adapt config
config.update(
{
'''mask_ratio''': model_args.mask_ratio,
'''norm_pix_loss''': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
A : List[str] = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **snake_case__ )
elif model_args.model_name_or_path:
A : List[str] = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **snake_case__ )
else:
A : Optional[int] = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
A : int = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=snake_case__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
A : Union[str, Any] = ViTMAEForPreTraining(snake_case__ )
if training_args.do_train:
A : int = ds['''train'''].column_names
else:
A : Tuple = ds['''validation'''].column_names
if data_args.image_column_name is not None:
A : Optional[int] = data_args.image_column_name
elif "image" in column_names:
A : List[Any] = '''image'''
elif "img" in column_names:
A : Any = '''img'''
else:
A : Optional[Any] = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
A : str = image_processor.size['''shortest_edge''']
else:
A : List[Any] = (image_processor.size['''height'''], image_processor.size['''width'''])
A : List[Any] = Compose(
[
Lambda(lambda snake_case__ : img.convert('''RGB''' ) if img.mode != "RGB" else img ),
RandomResizedCrop(snake_case__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(snake_case__ ):
A : str = [transforms(snake_case__ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
A : Optional[int] = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(snake_case__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
A : List[str] = (
ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(snake_case__ )
# Compute absolute learning rate
A : Tuple = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
A : List[Any] = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
A : List[Any] = Trainer(
model=snake_case__ , args=snake_case__ , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=snake_case__ , data_collator=snake_case__ , )
# Training
if training_args.do_train:
A : List[str] = None
if training_args.resume_from_checkpoint is not None:
A : List[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A : Any = last_checkpoint
A : List[Any] = trainer.train(resume_from_checkpoint=snake_case__ )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
A : Optional[int] = trainer.evaluate()
trainer.log_metrics('''eval''' , snake_case__ )
trainer.save_metrics('''eval''' , snake_case__ )
# Write model card and (optionally) push to hub
A : Tuple = {
'''tasks''': '''masked-auto-encoding''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-auto-encoding'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case__ )
else:
trainer.create_model_card(**snake_case__ )
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 343
| 1
|
'''simple docstring'''
import functools
def _UpperCamelCase (_lowerCamelCase : str , _lowerCamelCase : str )-> str:
'''simple docstring'''
__snake_case = len(__snake_case )
__snake_case = len(__snake_case )
@functools.cache
def min_distance(_lowerCamelCase : int , _lowerCamelCase : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
__snake_case = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , __snake_case ) , 1 + min_distance(__snake_case , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class lowercase__ :
__UpperCAmelCase = XGLMConfig
__UpperCAmelCase = {}
__UpperCAmelCase = '''gelu'''
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=14 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=0.02 , ) -> List[str]:
_lowerCamelCase : Optional[int] = parent
_lowerCamelCase : int = batch_size
_lowerCamelCase : str = seq_length
_lowerCamelCase : Any = is_training
_lowerCamelCase : int = use_input_mask
_lowerCamelCase : Union[str, Any] = use_labels
_lowerCamelCase : str = vocab_size
_lowerCamelCase : List[str] = d_model
_lowerCamelCase : List[Any] = num_hidden_layers
_lowerCamelCase : Dict = num_attention_heads
_lowerCamelCase : int = ffn_dim
_lowerCamelCase : str = activation_function
_lowerCamelCase : Optional[int] = activation_dropout
_lowerCamelCase : Tuple = attention_dropout
_lowerCamelCase : Tuple = max_position_embeddings
_lowerCamelCase : Dict = initializer_range
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : List[Any] = 2
_lowerCamelCase : str = 1
def UpperCamelCase_ ( self) -> int:
return XGLMConfig.from_pretrained("""facebook/xglm-564M""")
def UpperCamelCase_ ( self) -> int:
_lowerCamelCase : Union[str, Any] = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) , clip_value_min=0 , clip_value_max=3)
_lowerCamelCase : str = None
if self.use_input_mask:
_lowerCamelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length])
_lowerCamelCase : Tuple = self.get_config()
_lowerCamelCase : Optional[int] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2)
return (
config,
input_ids,
input_mask,
head_mask,
)
def UpperCamelCase_ ( self) -> Optional[int]:
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=SCREAMING_SNAKE_CASE , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=SCREAMING_SNAKE_CASE , )
def UpperCamelCase_ ( self) -> Optional[int]:
_lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : str = config_and_inputs
_lowerCamelCase : Optional[Any] = {
"""input_ids""": input_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_tf
class lowercase__ ( A_ ,A_ ,unittest.TestCase ):
__UpperCAmelCase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
__UpperCAmelCase = (TFXGLMForCausalLM,) if is_tf_available() else ()
__UpperCAmelCase = (
{'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def UpperCamelCase_ ( self) -> Optional[Any]:
_lowerCamelCase : Optional[Any] = TFXGLMModelTester(self)
_lowerCamelCase : str = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , n_embd=37)
def UpperCamelCase_ ( self) -> Dict:
self.config_tester.run_common_tests()
@slow
def UpperCamelCase_ ( self) -> List[Any]:
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Tuple = TFXGLMModel.from_pretrained(SCREAMING_SNAKE_CASE)
self.assertIsNotNone(SCREAMING_SNAKE_CASE)
@unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""")
def UpperCamelCase_ ( self) -> List[Any]:
super().test_resize_token_embeddings()
@require_tf
class lowercase__ ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE=True) -> List[Any]:
_lowerCamelCase : List[str] = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""")
_lowerCamelCase : Union[str, Any] = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
_lowerCamelCase : Dict = [2, 268, 9865, 67, 11, 1988, 5_7252, 9865, 5, 984, 67, 1988, 21_3838, 1658, 53, 7_0446, 33, 6657, 278, 1581]
# fmt: on
_lowerCamelCase : str = model.generate(SCREAMING_SNAKE_CASE , do_sample=SCREAMING_SNAKE_CASE , num_beams=1)
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , SCREAMING_SNAKE_CASE)
@slow
def UpperCamelCase_ ( self) -> int:
_lowerCamelCase : int = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""")
_lowerCamelCase : Tuple = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""")
tf.random.set_seed(0)
_lowerCamelCase : Union[str, Any] = tokenizer("""Today is a nice day and""" , return_tensors="""tf""")
_lowerCamelCase : Any = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(""":/CPU:0"""):
_lowerCamelCase : Any = model.generate(SCREAMING_SNAKE_CASE , do_sample=SCREAMING_SNAKE_CASE , seed=[7, 0])
_lowerCamelCase : List[str] = tokenizer.decode(output_ids[0] , skip_special_tokens=SCREAMING_SNAKE_CASE)
_lowerCamelCase : Any = (
"""Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"""
)
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
@slow
def UpperCamelCase_ ( self) -> List[Any]:
_lowerCamelCase : Optional[Any] = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""")
_lowerCamelCase : Any = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""")
_lowerCamelCase : List[Any] = """left"""
# use different length sentences to test batching
_lowerCamelCase : List[Any] = [
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When""",
"""Hello, my dog is a little""",
]
_lowerCamelCase : Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE , return_tensors="""tf""" , padding=SCREAMING_SNAKE_CASE)
_lowerCamelCase : int = inputs["""input_ids"""]
_lowerCamelCase : List[Any] = model.generate(input_ids=SCREAMING_SNAKE_CASE , attention_mask=inputs["""attention_mask"""] , max_new_tokens=12)
_lowerCamelCase : List[str] = tokenizer(sentences[0] , return_tensors="""tf""").input_ids
_lowerCamelCase : Optional[Any] = model.generate(input_ids=SCREAMING_SNAKE_CASE , max_new_tokens=12)
_lowerCamelCase : Tuple = tokenizer(sentences[1] , return_tensors="""tf""").input_ids
_lowerCamelCase : int = model.generate(input_ids=SCREAMING_SNAKE_CASE , max_new_tokens=12)
_lowerCamelCase : Optional[int] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE)
_lowerCamelCase : Optional[Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE)
_lowerCamelCase : Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE)
_lowerCamelCase : Tuple = [
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When left padding is applied, the sequence will be """
"""a single""",
"""Hello, my dog is a little bit of a shy one, but he is very friendly""",
]
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
self.assertListEqual(SCREAMING_SNAKE_CASE , [non_padded_sentence, padded_sentence])
| 88
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__snake_case = {
"""configuration_rag""": ["""RagConfig"""],
"""retrieval_rag""": ["""RagRetriever"""],
"""tokenization_rag""": ["""RagTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""RagModel""",
"""RagPreTrainedModel""",
"""RagSequenceForGeneration""",
"""RagTokenForGeneration""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""TFRagModel""",
"""TFRagPreTrainedModel""",
"""TFRagSequenceForGeneration""",
"""TFRagTokenForGeneration""",
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 708
|
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""deepmind/language-perceiver""": """https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json""",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class lowercase__ ( _UpperCAmelCase ):
A__ : Optional[Any] ="""perceiver"""
def __init__( self : Any , UpperCAmelCase_ : List[Any]=256 , UpperCAmelCase_ : Tuple=1280 , UpperCAmelCase_ : Union[str, Any]=768 , UpperCAmelCase_ : Any=1 , UpperCAmelCase_ : Optional[int]=26 , UpperCAmelCase_ : str=8 , UpperCAmelCase_ : Any=8 , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Tuple="kv" , UpperCAmelCase_ : str=1 , UpperCAmelCase_ : int=1 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : Dict=1e-1_2 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Dict=262 , UpperCAmelCase_ : int=2048 , UpperCAmelCase_ : Union[str, Any]=56 , UpperCAmelCase_ : Dict=[368, 496] , UpperCAmelCase_ : List[Any]=16 , UpperCAmelCase_ : List[str]=1920 , UpperCAmelCase_ : Tuple=16 , UpperCAmelCase_ : Dict=[1, 16, 224, 224] , **UpperCAmelCase_ : Any , ):
super().__init__(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = num_latents
SCREAMING_SNAKE_CASE__ = d_latents
SCREAMING_SNAKE_CASE__ = d_model
SCREAMING_SNAKE_CASE__ = num_blocks
SCREAMING_SNAKE_CASE__ = num_self_attends_per_block
SCREAMING_SNAKE_CASE__ = num_self_attention_heads
SCREAMING_SNAKE_CASE__ = num_cross_attention_heads
SCREAMING_SNAKE_CASE__ = qk_channels
SCREAMING_SNAKE_CASE__ = v_channels
SCREAMING_SNAKE_CASE__ = cross_attention_shape_for_attention
SCREAMING_SNAKE_CASE__ = self_attention_widening_factor
SCREAMING_SNAKE_CASE__ = cross_attention_widening_factor
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = use_query_residual
# masked language modeling attributes
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = max_position_embeddings
# image classification attributes
SCREAMING_SNAKE_CASE__ = image_size
# flow attributes
SCREAMING_SNAKE_CASE__ = train_size
# multimodal autoencoding attributes
SCREAMING_SNAKE_CASE__ = num_frames
SCREAMING_SNAKE_CASE__ = audio_samples_per_frame
SCREAMING_SNAKE_CASE__ = samples_per_patch
SCREAMING_SNAKE_CASE__ = output_shape
class lowercase__ ( _UpperCAmelCase ):
@property
def A_ ( self : Dict ):
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE__ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('inputs', dynamic_axis),
('attention_mask', dynamic_axis),
] )
@property
def A_ ( self : List[str] ):
return 1e-4
def A_ ( self : Union[str, Any] , UpperCAmelCase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[TensorType] = None , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : int = 40 , UpperCAmelCase_ : int = 40 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE__ = compute_effective_axis_dimension(
UpperCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE__ = preprocessor.num_special_tokens_to_add(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = compute_effective_axis_dimension(
UpperCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCAmelCase_ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE__ = [' '.join(['a'] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE__ = dict(preprocessor(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE__ = inputs.pop('input_ids' )
return inputs
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE__ = compute_effective_axis_dimension(UpperCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch )
SCREAMING_SNAKE_CASE__ = self._generate_dummy_images(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = dict(preprocessor(images=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE__ = inputs.pop('pixel_values' )
return inputs
else:
raise ValueError(
'Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.' )
| 400
| 0
|
"""simple docstring"""
def __magic_name__ ( lowercase = 100 ):
SCREAMING_SNAKE_CASE_: List[str] =(n * (n + 1) // 2) ** 2
SCREAMING_SNAKE_CASE_: Optional[int] =n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 409
|
"""simple docstring"""
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
_UpperCAmelCase = get_tests_dir("""fixtures""")
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : Any ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =mock.Mock()
SCREAMING_SNAKE_CASE_: Tuple =500
SCREAMING_SNAKE_CASE_: Union[str, Any] ={}
SCREAMING_SNAKE_CASE_: str =HTTPError
SCREAMING_SNAKE_CASE_: Any ={}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE_: Union[str, Any] =ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=lowerCAmelCase ) as mock_head:
SCREAMING_SNAKE_CASE_: List[Any] =ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =ViTImageProcessor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json""" )
def lowerCamelCase__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
with self.assertRaises(lowerCAmelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
SCREAMING_SNAKE_CASE_: int =AutoImageProcessor.from_pretrained("""hf-internal-testing/stable-diffusion-all-variants""" )
SCREAMING_SNAKE_CASE_: Optional[int] =AutoImageProcessor.from_pretrained(
"""hf-internal-testing/stable-diffusion-all-variants""" , subfolder="""feature_extractor""" )
self.assertIsNotNone(lowerCAmelCase )
@is_staging_test
class a ( unittest.TestCase ):
@classmethod
def lowerCamelCase__ ( cls : List[str] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =TOKEN
HfFolder.save_token(lowerCAmelCase )
@classmethod
def lowerCamelCase__ ( cls : Dict ) -> Optional[Any]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="""test-image-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-image-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-image-processor""" )
except HTTPError:
pass
def lowerCamelCase__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =ViTImageProcessor.from_pretrained(lowerCAmelCase )
image_processor.push_to_hub("""test-image-processor""" , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_: List[Any] =ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowerCAmelCase , getattr(lowerCAmelCase , lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
lowerCAmelCase , repo_id="""test-image-processor""" , push_to_hub=lowerCAmelCase , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_: List[Any] =ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowerCAmelCase , getattr(lowerCAmelCase , lowerCAmelCase ) )
def lowerCamelCase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =ViTImageProcessor.from_pretrained(lowerCAmelCase )
image_processor.push_to_hub("""valid_org/test-image-processor""" , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_: Optional[int] =ViTImageProcessor.from_pretrained("""valid_org/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowerCAmelCase , getattr(lowerCAmelCase , lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
lowerCAmelCase , repo_id="""valid_org/test-image-processor-org""" , push_to_hub=lowerCAmelCase , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_: Dict =ViTImageProcessor.from_pretrained("""valid_org/test-image-processor-org""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowerCAmelCase , getattr(lowerCAmelCase , lowerCAmelCase ) )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
CustomImageProcessor.register_for_auto_class()
SCREAMING_SNAKE_CASE_: Any =CustomImageProcessor.from_pretrained(lowerCAmelCase )
image_processor.push_to_hub("""test-dynamic-image-processor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"""AutoImageProcessor""": """custom_image_processing.CustomImageProcessor"""} , )
SCREAMING_SNAKE_CASE_: List[str] =AutoImageProcessor.from_pretrained(
f'''{USER}/test-dynamic-image-processor''' , trust_remote_code=lowerCAmelCase )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , """CustomImageProcessor""" )
| 409
| 1
|
from PIL import Image
def _UpperCamelCase (a__ :Image , a__ :int ):
"""simple docstring"""
UpperCamelCase__ = (259 * (level + 255)) / (255 * (259 - level))
def contrast(a__ :int ) -> int:
return int(128 + factor * (c - 128) )
return img.point(a__ )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change contrast to 170
UpperCamelCase__ = change_contrast(img, 170)
cont_img.save("image_data/lena_high_contrast.png", format="png")
| 706
|
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--txt2img_unclip",
default="kakaobrain/karlo-v1-alpha",
type=str,
required=False,
help="The pretrained txt2img unclip.",
)
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
UpperCamelCase__ = CLIPImageProcessor()
UpperCamelCase__ = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
UpperCamelCase__ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 548
| 0
|
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : int ) -> bool:
_lowercase = len(SCREAMING_SNAKE_CASE_ )
_lowercase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
_lowercase = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
_lowercase = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
_lowercase = subset[i - 1][j]
if arr[i - 1] <= j:
_lowercase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 287
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase=7 , __UpperCamelCase=3 , __UpperCamelCase=10 , __UpperCamelCase=18 , __UpperCamelCase=30 , __UpperCamelCase=400 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=None , ):
_lowercase = size if size is not None else {"""shortest_edge""": 18}
_lowercase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_lowercase = parent
_lowercase = batch_size
_lowercase = num_channels
_lowercase = num_frames
_lowercase = image_size
_lowercase = min_resolution
_lowercase = max_resolution
_lowercase = do_resize
_lowercase = size
_lowercase = do_normalize
_lowercase = image_mean
_lowercase = image_std
_lowercase = crop_size
def UpperCamelCase_ ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class a_ ( _a , unittest.TestCase ):
a : str = VivitImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ):
_lowercase = VivitImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ):
_lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """image_std""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """size""" ) )
def UpperCamelCase_ ( self ):
_lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
_lowercase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def UpperCamelCase_ ( self ):
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
_lowercase = prepare_video_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase )
for video in video_inputs:
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
_lowercase = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowercase = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCamelCase_ ( self ):
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowercase = prepare_video_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase )
for video in video_inputs:
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
_lowercase = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowercase = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCamelCase_ ( self ):
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowercase = prepare_video_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for video in video_inputs:
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
_lowercase = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowercase = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 287
| 1
|
import re
def _a ( UpperCamelCase_ : str ) -> list:
"""simple docstring"""
return [char.split() for char in re.split(R"[^ a-z A-Z 0-9 \s]" , str_ )]
def _a ( UpperCamelCase_ : str ) -> str:
"""simple docstring"""
lowerCAmelCase__ = split_input(str_ )
return "".join(
["".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def _a ( UpperCamelCase_ : str , UpperCamelCase_ : bool , UpperCamelCase_ : str ) -> str:
"""simple docstring"""
try:
lowerCAmelCase__ = split_input(UpperCamelCase_ )
if upper:
lowerCAmelCase__ = "".join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
lowerCAmelCase__ = "".join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def _a ( UpperCamelCase_ : str ) -> str:
"""simple docstring"""
return to_simple_case(UpperCamelCase_ )
def _a ( UpperCamelCase_ : str ) -> str:
"""simple docstring"""
try:
lowerCAmelCase__ = to_simple_case(UpperCamelCase_ )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def _a ( UpperCamelCase_ : str , UpperCamelCase_ : bool ) -> str:
"""simple docstring"""
return to_complex_case(UpperCamelCase_ , UpperCamelCase_ , "_" )
def _a ( UpperCamelCase_ : str , UpperCamelCase_ : bool ) -> str:
"""simple docstring"""
return to_complex_case(UpperCamelCase_ , UpperCamelCase_ , "-" )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 719
|
def _a ( UpperCamelCase_ : list , UpperCamelCase_ : list ) -> float:
"""simple docstring"""
_validate_point(UpperCamelCase_ )
_validate_point(UpperCamelCase_ )
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(UpperCamelCase_ , UpperCamelCase_ ) ) )
def _a ( UpperCamelCase_ : list[float] ) -> None:
"""simple docstring"""
if point:
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
for item in point:
if not isinstance(UpperCamelCase_ , (int, float) ):
lowerCAmelCase__ = (
"Expected a list of numbers as input, found "
F"{type(UpperCamelCase_ ).__name__}"
)
raise TypeError(UpperCamelCase_ )
else:
lowerCAmelCase__ = F"Expected a list of numbers as input, found {type(UpperCamelCase_ ).__name__}"
raise TypeError(UpperCamelCase_ )
else:
raise ValueError("Missing an input" )
def _a ( UpperCamelCase_ : list , UpperCamelCase_ : list ) -> float:
"""simple docstring"""
_validate_point(UpperCamelCase_ )
_validate_point(UpperCamelCase_ )
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(UpperCamelCase_ , UpperCamelCase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 115
| 0
|
"""simple docstring"""
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('''.''')
def __lowercase ( _a ):
snake_case_ : Dict = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'''`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '''
f"{test_file} instead." )
snake_case_ : Tuple = components[-1]
if not test_fn.endswith('''py''' ):
raise ValueError(f"`test_file` should be a python file. Got {test_fn} instead." )
if not test_fn.startswith('''test_modeling_''' ):
raise ValueError(
f"`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead." )
snake_case_ : List[Any] = components[:-1] + [test_fn.replace('''.py''' , '''''' )]
snake_case_ : Optional[int] = '''.'''.join(_a )
return test_module_path
def __lowercase ( _a ):
snake_case_ : Tuple = get_module_path(_a )
snake_case_ : List[str] = importlib.import_module(_a )
return test_module
def __lowercase ( _a ):
snake_case_ : Tuple = []
snake_case_ : List[str] = get_test_module(_a )
for attr in dir(_a ):
if attr.endswith('''ModelTester''' ):
tester_classes.append(getattr(_a , _a ) )
# sort with class names
return sorted(_a , key=lambda _a : x.__name__ )
def __lowercase ( _a ):
snake_case_ : Any = []
snake_case_ : str = get_test_module(_a )
for attr in dir(_a ):
snake_case_ : Any = getattr(_a , _a )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
snake_case_ : Any = getattr(_a , '''all_model_classes''' , [] )
if len(_a ) > 0:
test_classes.append(_a )
# sort with class names
return sorted(_a , key=lambda _a : x.__name__ )
def __lowercase ( _a ):
snake_case_ : List[Any] = get_test_classes(_a )
snake_case_ : Dict = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(_a , key=lambda _a : x.__name__ )
def __lowercase ( _a ):
snake_case_ : Optional[int] = test_class()
if hasattr(_a , '''setUp''' ):
test.setUp()
snake_case_ : Dict = None
if hasattr(_a , '''model_tester''' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
snake_case_ : List[Any] = test.model_tester.__class__
return model_tester
def __lowercase ( _a , _a ):
snake_case_ : str = get_test_classes(_a )
snake_case_ : Any = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(_a )
# sort with class names
return sorted(_a , key=lambda _a : x.__name__ )
def __lowercase ( _a , _a ):
snake_case_ : str = get_test_classes_for_model(_a , _a )
snake_case_ : Dict = []
for test_class in test_classes:
snake_case_ : str = get_model_tester_from_test_class(_a )
if tester_class is not None:
tester_classes.append(_a )
# sort with class names
return sorted(_a , key=lambda _a : x.__name__ )
def __lowercase ( _a ):
snake_case_ : Tuple = get_test_classes(_a )
snake_case_ : str = {test_class: get_model_tester_from_test_class(_a ) for test_class in test_classes}
return test_tester_mapping
def __lowercase ( _a ):
snake_case_ : Optional[int] = get_model_classes(_a )
snake_case_ : Optional[int] = {
model_class: get_test_classes_for_model(_a , _a ) for model_class in model_classes
}
return model_test_mapping
def __lowercase ( _a ):
snake_case_ : List[Any] = get_model_classes(_a )
snake_case_ : List[Any] = {
model_class: get_tester_classes_for_model(_a , _a ) for model_class in model_classes
}
return model_to_tester_mapping
def __lowercase ( _a ):
if isinstance(_a , _a ):
return o
elif isinstance(_a , _a ):
return o.__name__
elif isinstance(_a , (list, tuple) ):
return [to_json(_a ) for x in o]
elif isinstance(_a , _a ):
return {to_json(_a ): to_json(_a ) for k, v in o.items()}
else:
return o
| 123
|
"""simple docstring"""
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
lowercase__ : Optional[int] = '''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
lowercase__ : Dict = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
lowercase__ : List[Any] = dict(zip(vocab, range(len(vocab))))
lowercase__ : Optional[int] = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ : Optional[Any] = Path(tmpdirname)
lowercase__ : Any = build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
lowercase__ : Tuple = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
lowercase__ : List[Any] = build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
lowercase__ : int = FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
lowercase__ : Tuple = FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=10_00,
tgt_vocab_size=10_00,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
lowercase__ : Optional[int] = FSMTForConditionalGeneration(config)
print(f'num of params {tiny_model.num_parameters()}')
# Test
lowercase__ : Dict = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
lowercase__ : List[Any] = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f'Generated {mname_tiny}')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 123
| 1
|
'''simple docstring'''
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = """Hello, World!"""
lowerCamelCase = """en_XX"""
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =Path('data_bin' )
__lowercase =FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_lowerCAmelCase ).parent ) , checkpoint_file=Path(_lowerCAmelCase ).name , _name='xmod_base' , arch='xmod_base' , task='multilingual_masked_lm' , data_name_or_path=str(_lowerCAmelCase ) , bpe='sentencepiece' , sentencepiece_model=str(Path(_lowerCAmelCase ).parent / 'sentencepiece.bpe.model' ) , src_dict=str(data_dir / 'dict.txt' ) , )
xmod.eval() # disable dropout
print(_lowerCAmelCase )
__lowercase =xmod.model.encoder.sentence_encoder
__lowercase =XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , 'bottleneck' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
__lowercase =xmod.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our X-MOD config:' , _lowerCAmelCase )
__lowercase =XmodForSequenceClassification(_lowerCAmelCase ) if classification_head else XmodForMaskedLM(_lowerCAmelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
__lowercase =xmod_sent_encoder.embed_tokens.weight
__lowercase =xmod_sent_encoder.embed_positions.weight
__lowercase =torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
__lowercase =xmod_sent_encoder.layernorm_embedding.weight
__lowercase =xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__lowercase =model.roberta.encoder.layer[i]
__lowercase =xmod_sent_encoder.layers[i]
# self attention
__lowercase =layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('Dimensions of self-attention weights do not match.' )
__lowercase =xmod_layer.self_attn.q_proj.weight
__lowercase =xmod_layer.self_attn.q_proj.bias
__lowercase =xmod_layer.self_attn.k_proj.weight
__lowercase =xmod_layer.self_attn.k_proj.bias
__lowercase =xmod_layer.self_attn.v_proj.weight
__lowercase =xmod_layer.self_attn.v_proj.bias
# self-attention output
__lowercase =layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('Dimensions of self-attention output weights do not match.' )
__lowercase =xmod_layer.self_attn.out_proj.weight
__lowercase =xmod_layer.self_attn.out_proj.bias
__lowercase =xmod_layer.self_attn_layer_norm.weight
__lowercase =xmod_layer.self_attn_layer_norm.bias
# intermediate
__lowercase =layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of intermediate weights do not match.' )
__lowercase =xmod_layer.fca.weight
__lowercase =xmod_layer.fca.bias
# output
__lowercase =layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of feed-forward weights do not match.' )
__lowercase =xmod_layer.fca.weight
__lowercase =xmod_layer.fca.bias
__lowercase =xmod_layer.final_layer_norm.weight
__lowercase =xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
__lowercase =xmod_layer.adapter_layer_norm.weight
__lowercase =xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('Lists of language adapters do not match.' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
__lowercase =bert_output.adapter_modules[lang_code]
__lowercase =xmod_layer.adapter_modules[lang_code]
__lowercase =from_adapter.fca.weight
__lowercase =from_adapter.fca.bias
__lowercase =from_adapter.fca.weight
__lowercase =from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
__lowercase =xmod_sent_encoder.layer_norm.weight
__lowercase =xmod_sent_encoder.layer_norm.bias
if classification_head:
__lowercase =xmod.model.classification_heads['mnli'].dense.weight
__lowercase =xmod.model.classification_heads['mnli'].dense.bias
__lowercase =xmod.model.classification_heads['mnli'].out_proj.weight
__lowercase =xmod.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
__lowercase =xmod.model.encoder.lm_head.dense.weight
__lowercase =xmod.model.encoder.lm_head.dense.bias
__lowercase =xmod.model.encoder.lm_head.layer_norm.weight
__lowercase =xmod.model.encoder.lm_head.layer_norm.bias
__lowercase =xmod.model.encoder.lm_head.weight
__lowercase =xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
__lowercase =xmod.encode(_lowerCAmelCase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_lowerCAmelCase )
__lowercase =model(_lowerCAmelCase )[0]
if classification_head:
__lowercase =xmod.model.classification_heads['mnli'](xmod.extract_features(_lowerCAmelCase ) )
else:
__lowercase =xmod.model(_lowerCAmelCase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
__lowercase =torch.max(torch.abs(our_output - their_output ) ).item()
print(f"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
__lowercase =torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
Path(_lowerCAmelCase ).mkdir(parents=_lowerCAmelCase , exist_ok=_lowerCAmelCase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
lowerCamelCase = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 454
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCamelCase = """Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"""
def _A ( ):
"""simple docstring"""
__lowercase =_ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__lowercase =get_sagemaker_input()
else:
__lowercase =get_cluster_input()
return config
def _A ( _lowerCAmelCase=None ):
"""simple docstring"""
if subparsers is not None:
__lowercase =subparsers.add_parser('config' , description=_lowerCAmelCase )
else:
__lowercase =argparse.ArgumentParser('Accelerate config command' , description=_lowerCAmelCase )
parser.add_argument(
'--config_file' , default=_lowerCAmelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=_lowerCAmelCase )
return parser
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =get_user_input()
if args.config_file is not None:
__lowercase =args.config_file
else:
if not os.path.isdir(_lowerCAmelCase ):
os.makedirs(_lowerCAmelCase )
__lowercase =default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(_lowerCAmelCase )
else:
config.to_yaml_file(_lowerCAmelCase )
print(f"""accelerate configuration saved at {config_file}""" )
def _A ( ):
"""simple docstring"""
__lowercase =config_command_parser()
__lowercase =parser.parse_args()
config_command(_lowerCAmelCase )
if __name__ == "__main__":
main()
| 454
| 1
|
from __future__ import annotations
def __lowerCAmelCase ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float ) -> dict[str, float]:
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance < 0:
raise ValueError("""Resistance cannot be negative""" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354
|
from manim import *
class __a ( SCREAMING_SNAKE_CASE ):
def UpperCamelCase ( self : Tuple)-> Dict:
__lowerCAmelCase =Rectangle(height=0.5 , width=0.5)
__lowerCAmelCase =Rectangle(height=0.4_6 , width=0.4_6).set_stroke(width=0)
__lowerCAmelCase =[mem.copy() for i in range(6)]
__lowerCAmelCase =[mem.copy() for i in range(6)]
__lowerCAmelCase =VGroup(*snake_case_).arrange(snake_case_ , buff=0)
__lowerCAmelCase =VGroup(*snake_case_).arrange(snake_case_ , buff=0)
__lowerCAmelCase =VGroup(snake_case_ , snake_case_).arrange(snake_case_ , buff=0)
__lowerCAmelCase =Text("""CPU""" , font_size=24)
__lowerCAmelCase =Group(snake_case_ , snake_case_).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_)
cpu.move_to([-2.5, -0.5, 0])
self.add(snake_case_)
__lowerCAmelCase =[mem.copy() for i in range(1)]
__lowerCAmelCase =VGroup(*snake_case_).arrange(snake_case_ , buff=0)
__lowerCAmelCase =Text("""GPU""" , font_size=24)
__lowerCAmelCase =Group(snake_case_ , snake_case_).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_)
gpu.align_to(snake_case_ , snake_case_)
gpu.set_x(gpu.get_x() - 1)
self.add(snake_case_)
__lowerCAmelCase =[mem.copy() for i in range(6)]
__lowerCAmelCase =VGroup(*snake_case_).arrange(snake_case_ , buff=0)
__lowerCAmelCase =Text("""Model""" , font_size=24)
__lowerCAmelCase =Group(snake_case_ , snake_case_).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_)
model.move_to([3, -1.0, 0])
self.play(
Create(snake_case_ , run_time=1) , Create(snake_case_ , run_time=1) , Create(snake_case_ , run_time=1) , )
__lowerCAmelCase =MarkupText(
F"""First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.""" , font_size=24 , )
__lowerCAmelCase =Square(side_length=2.2)
key.move_to([-5, 2, 0])
__lowerCAmelCase =MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0])
step_a.move_to([2, 2, 0])
self.play(Write(snake_case_ , run_time=2.5) , Write(snake_case_) , Write(snake_case_))
self.add(snake_case_)
__lowerCAmelCase =[]
__lowerCAmelCase =[]
__lowerCAmelCase =[]
for i, rect in enumerate(snake_case_):
__lowerCAmelCase =Rectangle(height=0.4_6 , width=0.4_6).set_stroke(width=0.0).set_fill(snake_case_ , opacity=0.7)
cpu_target.move_to(snake_case_)
cpu_target.generate_target()
__lowerCAmelCase =0.4_6 / 4
__lowerCAmelCase =0.4_6 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT) , buff=0.0_2 , direction=snake_case_)
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1)
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=snake_case_ , buff=0.0)
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=snake_case_ , buff=0.0)
cpu_targs.append(snake_case_)
first_animations.append(rect.animate(run_time=0.5).set_stroke(snake_case_))
second_animations.append(MoveToTarget(snake_case_ , run_time=1.5))
self.play(*snake_case_)
self.play(*snake_case_)
self.wait()
| 354
| 1
|
"""simple docstring"""
def UpperCAmelCase ( a_ ):
'''simple docstring'''
lowerCamelCase : Dict = int(a_ )
if n_element < 1:
lowerCamelCase : str = ValueError('a should be a positive number' )
raise my_error
lowerCamelCase : Dict = [1]
lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = (0, 0, 0)
lowerCamelCase : List[str] = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2, hamming_list[j] * 3, hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
_A = input('Enter the last number (nth term) of the Hamming Number Series: ')
print('Formula of Hamming Number Series => 2^i * 3^j * 5^k')
_A = hamming(int(n))
print('-----------------------------------------------------')
print(F"""The list with nth numbers is: {hamming_numbers}""")
print('-----------------------------------------------------')
| 700
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'caidas/swin2sr-classicalsr-x2-64': (
'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'
),
}
class _lowercase ( __UpperCAmelCase ):
lowercase_ = 'swin2sr'
lowercase_ = {
'hidden_size': 'embed_dim',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , UpperCAmelCase_=64 , UpperCAmelCase_=1 , UpperCAmelCase_=3 , UpperCAmelCase_=180 , UpperCAmelCase_=[6, 6, 6, 6, 6, 6] , UpperCAmelCase_=[6, 6, 6, 6, 6, 6] , UpperCAmelCase_=8 , UpperCAmelCase_=2.0 , UpperCAmelCase_=True , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.1 , UpperCAmelCase_="gelu" , UpperCAmelCase_=False , UpperCAmelCase_=0.02 , UpperCAmelCase_=1E-5 , UpperCAmelCase_=2 , UpperCAmelCase_=1.0 , UpperCAmelCase_="1conv" , UpperCAmelCase_="pixelshuffle" , **UpperCAmelCase_ , ) -> Union[str, Any]:
super().__init__(**UpperCAmelCase_ )
lowerCamelCase : int = image_size
lowerCamelCase : Tuple = patch_size
lowerCamelCase : Union[str, Any] = num_channels
lowerCamelCase : List[Any] = embed_dim
lowerCamelCase : int = depths
lowerCamelCase : Any = len(UpperCAmelCase_ )
lowerCamelCase : Tuple = num_heads
lowerCamelCase : Optional[int] = window_size
lowerCamelCase : str = mlp_ratio
lowerCamelCase : Tuple = qkv_bias
lowerCamelCase : Optional[Any] = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Optional[Any] = drop_path_rate
lowerCamelCase : Optional[int] = hidden_act
lowerCamelCase : str = use_absolute_embeddings
lowerCamelCase : Dict = layer_norm_eps
lowerCamelCase : Dict = initializer_range
lowerCamelCase : Optional[int] = upscale
lowerCamelCase : List[Any] = img_range
lowerCamelCase : Optional[Any] = resi_connection
lowerCamelCase : Union[str, Any] = upsampler
| 133
| 0
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''salesforce/blip2-opt-2.7b''': '''https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json''',
}
class __lowerCamelCase (_a ):
_lowercase = """blip_2_vision_model"""
def __init__( self: Optional[int],A_: str=1408,A_: Union[str, Any]=6144,A_: Tuple=39,A_: Optional[int]=16,A_: List[str]=224,A_: Optional[Any]=14,A_: Union[str, Any]="gelu",A_: List[Any]=0.0_0_0_0_1,A_: List[Any]=0.0,A_: List[Any]=1E-10,A_: Optional[int]=True,**A_: Union[str, Any],):
'''simple docstring'''
super().__init__(**A_ )
__UpperCamelCase = hidden_size
__UpperCamelCase = intermediate_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = patch_size
__UpperCamelCase = image_size
__UpperCamelCase = initializer_range
__UpperCamelCase = attention_dropout
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = hidden_act
__UpperCamelCase = qkv_bias
@classmethod
def snake_case_ ( cls: Tuple,A_: Union[str, os.PathLike],**A_: Tuple ):
'''simple docstring'''
cls._set_token_in_kwargs(A_ )
__UpperCamelCase, __UpperCamelCase = cls.get_config_dict(A_,**A_ )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
__UpperCamelCase = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(A_,**A_ )
class __lowerCamelCase (_a ):
_lowercase = """blip_2_qformer"""
def __init__( self: Optional[Any],A_: Optional[int]=3_0522,A_: List[Any]=768,A_: Any=12,A_: Dict=12,A_: int=3072,A_: Dict="gelu",A_: Union[str, Any]=0.1,A_: Any=0.1,A_: Any=512,A_: Dict=0.0_2,A_: str=1E-12,A_: List[Any]=0,A_: Optional[int]="absolute",A_: List[str]=2,A_: Optional[Any]=1408,**A_: Optional[Any],):
'''simple docstring'''
super().__init__(pad_token_id=A_,**A_ )
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = position_embedding_type
__UpperCamelCase = cross_attention_frequency
__UpperCamelCase = encoder_hidden_size
@classmethod
def snake_case_ ( cls: int,A_: Union[str, os.PathLike],**A_: int ):
'''simple docstring'''
cls._set_token_in_kwargs(A_ )
__UpperCamelCase, __UpperCamelCase = cls.get_config_dict(A_,**A_ )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
__UpperCamelCase = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(A_,**A_ )
class __lowerCamelCase (_a ):
_lowercase = """blip-2"""
_lowercase = True
def __init__( self: Union[str, Any],A_: Optional[int]=None,A_: List[str]=None,A_: Optional[Any]=None,A_: List[str]=32,**A_: Optional[Any] ):
'''simple docstring'''
super().__init__(**A_ )
if vision_config is None:
__UpperCamelCase = {}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
__UpperCamelCase = {}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
__UpperCamelCase = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
__UpperCamelCase = BlipaVisionConfig(**A_ )
__UpperCamelCase = BlipaQFormerConfig(**A_ )
__UpperCamelCase = text_config['model_type'] if 'model_type' in text_config else 'opt'
__UpperCamelCase = CONFIG_MAPPING[text_model_type](**A_ )
__UpperCamelCase = self.text_config.tie_word_embeddings
__UpperCamelCase = self.text_config.is_encoder_decoder
__UpperCamelCase = num_query_tokens
__UpperCamelCase = self.vision_config.hidden_size
__UpperCamelCase = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__UpperCamelCase = 1.0
__UpperCamelCase = 0.0_2
@classmethod
def snake_case_ ( cls: List[Any],A_: BlipaVisionConfig,A_: BlipaQFormerConfig,A_: PretrainedConfig,**A_: Optional[int],):
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict(),qformer_config=qformer_config.to_dict(),text_config=text_config.to_dict(),**A_,)
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = copy.deepcopy(self.__dict__ )
__UpperCamelCase = self.vision_config.to_dict()
__UpperCamelCase = self.qformer_config.to_dict()
__UpperCamelCase = self.text_config.to_dict()
__UpperCamelCase = self.__class__.model_type
return output
| 1
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
lowerCAmelCase = random.Random()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=1.0 , lowercase_=None , lowercase_=None ) -> Union[str, Any]:
'''simple docstring'''
if rng is None:
__UpperCAmelCase : str = global_rng
__UpperCAmelCase : List[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowercase__ , lowercase__=7 , lowercase__=4_0_0 , lowercase__=2_0_0_0 , lowercase__=2_0_4_8 , lowercase__=1_2_8 , lowercase__=1 , lowercase__=5_1_2 , lowercase__=3_0 , lowercase__=4_4_1_0_0 , ):
__UpperCAmelCase : Optional[Any] = parent
__UpperCAmelCase : Any = batch_size
__UpperCAmelCase : int = min_seq_length
__UpperCAmelCase : List[str] = max_seq_length
__UpperCAmelCase : Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__UpperCAmelCase : Any = spectrogram_length
__UpperCAmelCase : List[Any] = feature_size
__UpperCAmelCase : Union[str, Any] = num_audio_channels
__UpperCAmelCase : Optional[int] = hop_length
__UpperCAmelCase : Tuple = chunk_length
__UpperCAmelCase : Any = sampling_rate
def A( self):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def A( self , lowercase__=False , lowercase__=False):
def _flatten(lowercase__):
return list(itertools.chain(*lowercase__))
if equal_length:
__UpperCAmelCase : str = [floats_list((self.max_seq_length, self.feature_size)) for _ in range(self.batch_size)]
else:
# make sure that inputs increase in size
__UpperCAmelCase : List[str] = [
floats_list((x, self.feature_size))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
__UpperCAmelCase : List[str] = [np.asarray(lowercase__) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCamelCase ( _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : Optional[int] = TvltFeatureExtractor
def A( self):
__UpperCAmelCase : Dict = TvltFeatureExtractionTester(self)
def A( self):
__UpperCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict)
self.assertTrue(hasattr(lowercase__ , '''spectrogram_length'''))
self.assertTrue(hasattr(lowercase__ , '''feature_size'''))
self.assertTrue(hasattr(lowercase__ , '''num_audio_channels'''))
self.assertTrue(hasattr(lowercase__ , '''hop_length'''))
self.assertTrue(hasattr(lowercase__ , '''chunk_length'''))
self.assertTrue(hasattr(lowercase__ , '''sampling_rate'''))
def A( self):
__UpperCAmelCase : str = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : str = feat_extract_first.save_pretrained(lowercase__)[0]
check_json_file_has_correct_format(lowercase__)
__UpperCAmelCase : Optional[int] = self.feature_extraction_class.from_pretrained(lowercase__)
__UpperCAmelCase : List[Any] = feat_extract_first.to_dict()
__UpperCAmelCase : Union[str, Any] = feat_extract_second.to_dict()
__UpperCAmelCase : Union[str, Any] = dict_first.pop('''mel_filters''')
__UpperCAmelCase : Union[str, Any] = dict_second.pop('''mel_filters''')
self.assertTrue(np.allclose(lowercase__ , lowercase__))
self.assertEqual(lowercase__ , lowercase__)
def A( self):
__UpperCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Union[str, Any] = os.path.join(lowercase__ , '''feat_extract.json''')
feat_extract_first.to_json_file(lowercase__)
__UpperCAmelCase : str = self.feature_extraction_class.from_json_file(lowercase__)
__UpperCAmelCase : Any = feat_extract_first.to_dict()
__UpperCAmelCase : Union[str, Any] = feat_extract_second.to_dict()
__UpperCAmelCase : Tuple = dict_first.pop('''mel_filters''')
__UpperCAmelCase : List[str] = dict_second.pop('''mel_filters''')
self.assertTrue(np.allclose(lowercase__ , lowercase__))
self.assertEqual(lowercase__ , lowercase__)
def A( self):
# Initialize feature_extractor
__UpperCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict)
# create three inputs of length 800, 1000, and 1200
__UpperCAmelCase : Optional[int] = [floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)]
__UpperCAmelCase : int = [np.asarray(lowercase__) for speech_input in speech_inputs]
# Test not batched input
__UpperCAmelCase : Dict = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4_1_0_0).audio_values
self.assertTrue(encoded_audios.ndim == 4)
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size)
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length)
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels)
# Test batched
__UpperCAmelCase : List[str] = feature_extractor(lowercase__ , return_tensors='''np''' , sampling_rate=4_4_1_0_0).audio_values
self.assertTrue(encoded_audios.ndim == 4)
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size)
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length)
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels)
# Test audio masking
__UpperCAmelCase : Tuple = feature_extractor(
lowercase__ , return_tensors='''np''' , sampling_rate=4_4_1_0_0 , mask_audio=lowercase__).audio_values
self.assertTrue(encoded_audios.ndim == 4)
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size)
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length)
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels)
# Test 2-D numpy arrays are batched.
__UpperCAmelCase : Any = [floats_list((1, x))[0] for x in (8_0_0, 8_0_0, 8_0_0)]
__UpperCAmelCase : Optional[Any] = np.asarray(lowercase__)
__UpperCAmelCase : Tuple = feature_extractor(lowercase__ , return_tensors='''np''' , sampling_rate=4_4_1_0_0).audio_values
self.assertTrue(encoded_audios.ndim == 4)
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size)
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length)
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels)
def A( self , lowercase__):
__UpperCAmelCase : Optional[int] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''')
# automatic decoding with librispeech
__UpperCAmelCase : int = ds.sort('''id''').select(range(lowercase__))[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def A( self):
__UpperCAmelCase : Optional[Any] = self._load_datasamples(1)
__UpperCAmelCase : Tuple = TvltFeatureExtractor()
__UpperCAmelCase : Tuple = feature_extractor(lowercase__ , return_tensors='''pt''').audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_9_2, 1_2_8))
__UpperCAmelCase : int = torch.tensor([[-0.3_0_3_2, -0.2_7_0_8], [-0.4_4_3_4, -0.4_0_0_7]])
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , lowercase__ , atol=1e-4))
| 462
| 0
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class __A :
def __init__(self , __magic_name__ , __magic_name__=13 , __magic_name__=7 , __magic_name__=False , __magic_name__=True , __magic_name__=False , __magic_name__=False , __magic_name__=19 , __magic_name__=32 , __magic_name__=5 , __magic_name__=4 , __magic_name__=37 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=512 , __magic_name__=16 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=3 , __magic_name__=4 , __magic_name__=None , ):
lowerCamelCase__ : List[Any] = parent
lowerCamelCase__ : Union[str, Any] = batch_size
lowerCamelCase__ : List[str] = seq_length
lowerCamelCase__ : Dict = is_training
lowerCamelCase__ : Union[str, Any] = use_input_mask
lowerCamelCase__ : Any = use_token_type_ids
lowerCamelCase__ : Optional[int] = use_labels
lowerCamelCase__ : Optional[Any] = vocab_size
lowerCamelCase__ : List[Any] = hidden_size
lowerCamelCase__ : Tuple = num_hidden_layers
lowerCamelCase__ : Union[str, Any] = num_attention_heads
lowerCamelCase__ : List[str] = intermediate_size
lowerCamelCase__ : Tuple = hidden_act
lowerCamelCase__ : str = hidden_dropout_prob
lowerCamelCase__ : Dict = attention_probs_dropout_prob
lowerCamelCase__ : Dict = max_position_embeddings
lowerCamelCase__ : int = type_vocab_size
lowerCamelCase__ : List[Any] = type_sequence_label_size
lowerCamelCase__ : Any = initializer_range
lowerCamelCase__ : Dict = num_labels
lowerCamelCase__ : Dict = num_choices
lowerCamelCase__ : List[Any] = scope
def _snake_case (self ):
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : List[Any] = None
if self.use_input_mask:
lowerCamelCase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : List[Any] = None
lowerCamelCase__ : Any = None
lowerCamelCase__ : str = None
if self.use_labels:
lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ : List[str] = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ : Any = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case (self ):
lowerCamelCase__ : Union[str, Any] = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=lowerCamelCase__ , esmfold_config={"""trunk""": {"""num_blocks""": 2}, """fp16_esm""": False} , )
return config
def _snake_case (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase__ : str = EsmForProteinFolding(config=lowerCamelCase__ ).float()
model.to(lowerCamelCase__ )
model.eval()
lowerCamelCase__ : Tuple = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
lowerCamelCase__ : List[Any] = model(lowerCamelCase__ )
lowerCamelCase__ : str = model(lowerCamelCase__ )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def _snake_case (self ):
lowerCamelCase__ : Optional[Any] = self.prepare_config_and_inputs()
(
lowerCamelCase__
) : Any = config_and_inputs
lowerCamelCase__ : int = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __A ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase :str = False
UpperCamelCase :Union[str, Any] = (EsmForProteinFolding,) if is_torch_available() else ()
UpperCamelCase :Optional[Any] = ()
UpperCamelCase :str = {} if is_torch_available() else {}
UpperCamelCase :str = False
def _snake_case (self ):
lowerCamelCase__ : Dict = EsmFoldModelTester(self )
lowerCamelCase__ : str = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def _snake_case (self ):
self.config_tester.run_common_tests()
def _snake_case (self ):
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
@unittest.skip("""Does not support attention outputs""" )
def _snake_case (self ):
pass
@unittest.skip
def _snake_case (self ):
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def _snake_case (self ):
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def _snake_case (self ):
pass
@unittest.skip("""ESMFold does not support passing input embeds!""" )
def _snake_case (self ):
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def _snake_case (self ):
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def _snake_case (self ):
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def _snake_case (self ):
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def _snake_case (self ):
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def _snake_case (self ):
pass
@unittest.skip("""ESMFold does not output hidden states in the normal way.""" )
def _snake_case (self ):
pass
@unittest.skip("""ESMfold does not output hidden states in the normal way.""" )
def _snake_case (self ):
pass
@unittest.skip("""ESMFold only has one output format.""" )
def _snake_case (self ):
pass
@unittest.skip("""This test doesn't work for ESMFold and doesn't test core functionality""" )
def _snake_case (self ):
pass
@unittest.skip("""ESMFold does not support input chunking.""" )
def _snake_case (self ):
pass
@unittest.skip("""ESMFold doesn't respect you and it certainly doesn't respect your initialization arguments.""" )
def _snake_case (self ):
pass
@unittest.skip("""ESMFold doesn't support torchscript compilation.""" )
def _snake_case (self ):
pass
@unittest.skip("""ESMFold doesn't support torchscript compilation.""" )
def _snake_case (self ):
pass
@unittest.skip("""ESMFold doesn't support torchscript compilation.""" )
def _snake_case (self ):
pass
@unittest.skip("""ESMFold doesn't support data parallel.""" )
def _snake_case (self ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _snake_case (self ):
pass
@require_torch
class __A ( UpperCAmelCase__ ):
@slow
def _snake_case (self ):
lowerCamelCase__ : List[Any] = EsmForProteinFolding.from_pretrained("""facebook/esmfold_v1""" ).float()
model.eval()
lowerCamelCase__ : str = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase__ : Tuple = model(lowerCamelCase__ )["positions"]
lowerCamelCase__ : List[Any] = torch.tensor([2.58_28, 0.79_93, -10.9334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , lowerCamelCase__ , atol=1E-4 ) )
| 718
|
def _A (UpperCamelCase : int , UpperCamelCase : int ) ->int:
'''simple docstring'''
while b:
lowerCamelCase__ ,lowerCamelCase__ : int = b, a % b
return a
def _A (UpperCamelCase : int , UpperCamelCase : int ) ->int:
'''simple docstring'''
return a if b == 0 else euclidean_gcd_recursive(UpperCamelCase , a % b )
def _A () ->str:
'''simple docstring'''
print(f"euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}" )
print(f"euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}" )
print(f"euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}" )
print(f"euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}" )
print(f"euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}" )
print(f"euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}" )
print(f"euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}" )
print(f"euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}" )
print(f"euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}" )
print(f"euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}" )
if __name__ == "__main__":
main()
| 96
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
'''configuration_jukebox''': [
'''JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''JukeboxConfig''',
'''JukeboxPriorConfig''',
'''JukeboxVQVAEConfig''',
],
'''tokenization_jukebox''': ['''JukeboxTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''JukeboxModel''',
'''JukeboxPreTrainedModel''',
'''JukeboxVQVAE''',
'''JukeboxPrior''',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 282
|
from decimal import Decimal, getcontext
from math import ceil, factorial
def __lowerCamelCase ( _lowercase ) -> str:
if not isinstance(_lowercase , _lowercase ):
raise TypeError('Undefined for non-integers' )
elif precision < 1:
raise ValueError('Undefined for non-natural numbers' )
UpperCamelCase = precision
UpperCamelCase = ceil(precision / 14 )
UpperCamelCase = 426880 * Decimal(10005 ).sqrt()
UpperCamelCase = 1
UpperCamelCase = 13591409
UpperCamelCase = Decimal(_lowercase )
for k in range(1 , _lowercase ):
UpperCamelCase = factorial(6 * k ) // (factorial(3 * k ) * factorial(_lowercase ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
_snake_case = 50
print(F"The first {n} digits of pi is: {pi(n)}")
| 282
| 1
|
_lowercase: Dict = {str(digit): digit**5 for digit in range(1_0)}
def _lowerCamelCase ( snake_case ):
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(snake_case ) )
def _lowerCamelCase ( ):
return sum(
number
for number in range(1_000 , 1_000_000 )
if number == digits_fifth_powers_sum(snake_case ) )
if __name__ == "__main__":
print(solution())
| 715
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase__ ( UpperCAmelCase ):
UpperCamelCase__ =(UniPCMultistepScheduler,)
UpperCamelCase__ =(("num_inference_steps", 2_5),)
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , **lowercase__ : Dict ):
_lowerCAmelCase = {
'num_train_timesteps': 10_00,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'solver_order': 2,
'solver_type': 'bh2',
}
config.update(**lowercase__ )
return config
def SCREAMING_SNAKE_CASE__ ( self : Any , lowercase__ : List[str]=0 , **lowercase__ : Union[str, Any] ):
_lowerCAmelCase = dict(self.forward_default_kwargs )
_lowerCAmelCase = kwargs.pop('num_inference_steps' , lowercase__ )
_lowerCAmelCase = self.dummy_sample
_lowerCAmelCase = 0.1 * sample
_lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase = self.get_scheduler_config(**lowercase__ )
_lowerCAmelCase = scheduler_class(**lowercase__ )
scheduler.set_timesteps(lowercase__ )
# copy over dummy past residuals
_lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase__ )
_lowerCAmelCase = scheduler_class.from_pretrained(lowercase__ )
new_scheduler.set_timesteps(lowercase__ )
# copy over dummy past residuals
_lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase , _lowerCAmelCase = sample, sample
for t in range(lowercase__ , time_step + scheduler.config.solver_order + 1 ):
_lowerCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
_lowerCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , lowercase__ : Tuple=0 , **lowercase__ : List[Any] ):
_lowerCAmelCase = dict(self.forward_default_kwargs )
_lowerCAmelCase = kwargs.pop('num_inference_steps' , lowercase__ )
_lowerCAmelCase = self.dummy_sample
_lowerCAmelCase = 0.1 * sample
_lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**lowercase__ )
scheduler.set_timesteps(lowercase__ )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase__ )
_lowerCAmelCase = scheduler_class.from_pretrained(lowercase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase__ )
# copy over dummy past residual (must be after setting timesteps)
_lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
_lowerCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE__ ( self : Tuple , lowercase__ : int=None , **lowercase__ : List[Any] ):
if scheduler is None:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(**lowercase__ )
_lowerCAmelCase = scheduler_class(**lowercase__ )
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(**lowercase__ )
_lowerCAmelCase = scheduler_class(**lowercase__ )
_lowerCAmelCase = 10
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(lowercase__ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase = model(lowercase__ , lowercase__ )
_lowerCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ ).prev_sample
return sample
def SCREAMING_SNAKE_CASE__ ( self : str ):
_lowerCAmelCase = dict(self.forward_default_kwargs )
_lowerCAmelCase = kwargs.pop('num_inference_steps' , lowercase__ )
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**lowercase__ )
_lowerCAmelCase = self.dummy_sample
_lowerCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase__ , 'set_timesteps' ):
scheduler.set_timesteps(lowercase__ )
elif num_inference_steps is not None and not hasattr(lowercase__ , 'set_timesteps' ):
_lowerCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
_lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
_lowerCAmelCase = scheduler.timesteps[5]
_lowerCAmelCase = scheduler.timesteps[6]
_lowerCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
_lowerCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE__ ( self : str ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
_lowerCAmelCase = UniPCMultistepScheduler(**self.get_scheduler_config() )
_lowerCAmelCase = self.full_loop(scheduler=lowercase__ )
_lowerCAmelCase = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1e-3
_lowerCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_lowerCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase = self.full_loop(scheduler=lowercase__ )
_lowerCAmelCase = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
for timesteps in [25, 50, 1_00, 9_99, 10_00]:
self.check_over_configs(num_train_timesteps=lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
self.check_over_configs(thresholding=lowercase__ )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowercase__ , prediction_type=lowercase__ , sample_max_value=lowercase__ , solver_order=lowercase__ , solver_type=lowercase__ , )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowercase__ , solver_type=lowercase__ , prediction_type=lowercase__ , )
_lowerCAmelCase = self.full_loop(
solver_order=lowercase__ , solver_type=lowercase__ , prediction_type=lowercase__ , )
assert not torch.isnan(lowercase__ ).any(), "Samples have nan numbers"
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
self.check_over_configs(lower_order_final=lowercase__ )
self.check_over_configs(lower_order_final=lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]:
self.check_over_forward(num_inference_steps=lowercase__ , time_step=0 )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
_lowerCAmelCase = self.full_loop()
_lowerCAmelCase = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self : int ):
_lowerCAmelCase = self.full_loop(prediction_type='v_prediction' )
_lowerCAmelCase = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_mean.item() - 0.1_0_1_4 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self : str ):
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(thresholding=lowercase__ , dynamic_thresholding_ratio=0 )
_lowerCAmelCase = scheduler_class(**lowercase__ )
_lowerCAmelCase = 10
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowercase__ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase = model(lowercase__ , lowercase__ )
_lowerCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ ).prev_sample
assert sample.dtype == torch.floataa
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , **lowercase__ : List[Any] ):
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase = self.get_scheduler_config(**lowercase__ )
_lowerCAmelCase = scheduler_class(**lowercase__ )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 225
| 0
|
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : str = "SpeechT5FeatureExtractor"
_SCREAMING_SNAKE_CASE : int = "SpeechT5Tokenizer"
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Dict:
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __call__( self : List[str] , *SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : str ) -> Any:
__snake_case = kwargs.pop('audio' , SCREAMING_SNAKE_CASE_ )
__snake_case = kwargs.pop('text' , SCREAMING_SNAKE_CASE_ )
__snake_case = kwargs.pop('text_target' , SCREAMING_SNAKE_CASE_ )
__snake_case = kwargs.pop('audio_target' , SCREAMING_SNAKE_CASE_ )
__snake_case = kwargs.pop('sampling_rate' , SCREAMING_SNAKE_CASE_ )
if audio is not None and text is not None:
raise ValueError(
'Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?' )
if audio_target is not None and text_target is not None:
raise ValueError(
'Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.' )
if audio is not None:
__snake_case = self.feature_extractor(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
elif text is not None:
__snake_case = self.tokenizer(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
else:
__snake_case = None
if audio_target is not None:
__snake_case = self.feature_extractor(audio_target=SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__snake_case = targets['input_values']
elif text_target is not None:
__snake_case = self.tokenizer(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__snake_case = targets['input_ids']
else:
__snake_case = None
if inputs is None:
return targets
if targets is not None:
__snake_case = labels
__snake_case = targets.get('attention_mask' )
if decoder_attention_mask is not None:
__snake_case = decoder_attention_mask
return inputs
def a ( self : List[Any] , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : Dict ) -> Any:
__snake_case = kwargs.pop('input_values' , SCREAMING_SNAKE_CASE_ )
__snake_case = kwargs.pop('input_ids' , SCREAMING_SNAKE_CASE_ )
__snake_case = kwargs.pop('labels' , SCREAMING_SNAKE_CASE_ )
if input_values is not None and input_ids is not None:
raise ValueError('Cannot process both `input_values` and `input_ids` inputs.' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.' )
if input_values is not None:
__snake_case = self.feature_extractor.pad(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
elif input_ids is not None:
__snake_case = self.tokenizer.pad(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
else:
__snake_case = None
if labels is not None:
if "input_ids" in labels or (isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and "input_ids" in labels[0]):
__snake_case = self.tokenizer.pad(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__snake_case = targets['input_ids']
else:
__snake_case = self.feature_extractor.feature_size
__snake_case = self.feature_extractor.num_mel_bins
__snake_case = self.feature_extractor.pad(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__snake_case = feature_size_hack
__snake_case = targets['input_values']
else:
__snake_case = None
if inputs is None:
return targets
if targets is not None:
__snake_case = labels
__snake_case = targets.get('attention_mask' )
if decoder_attention_mask is not None:
__snake_case = decoder_attention_mask
return inputs
def a ( self : Optional[int] , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple:
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def a ( self : List[Any] , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Any ) -> Union[str, Any]:
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 56
|
from __future__ import annotations
def _A ( SCREAMING_SNAKE_CASE__ : tuple[int, int] , SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase , UpperCamelCase :List[Any] = position
UpperCamelCase :Any = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
UpperCamelCase :Dict = []
for position in positions:
UpperCamelCase , UpperCamelCase :str = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(SCREAMING_SNAKE_CASE__ )
return permissible_positions
def _A ( SCREAMING_SNAKE_CASE__ : list[list[int]] ):
return not any(elem == 0 for row in board for elem in row )
def _A ( SCREAMING_SNAKE_CASE__ : list[list[int]] , SCREAMING_SNAKE_CASE__ : tuple[int, int] , SCREAMING_SNAKE_CASE__ : int ):
if is_complete(SCREAMING_SNAKE_CASE__ ):
return True
for position in get_valid_pos(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ):
UpperCamelCase , UpperCamelCase :Optional[int] = position
if board[y][x] == 0:
UpperCamelCase :Any = curr + 1
if open_knight_tour_helper(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , curr + 1 ):
return True
UpperCamelCase :Union[str, Any] = 0
return False
def _A ( SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase :List[Any] = [[0 for i in range(SCREAMING_SNAKE_CASE__ )] for j in range(SCREAMING_SNAKE_CASE__ )]
for i in range(SCREAMING_SNAKE_CASE__ ):
for j in range(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Tuple = 1
if open_knight_tour_helper(SCREAMING_SNAKE_CASE__ , (i, j) , 1 ):
return board
UpperCamelCase :str = 0
UpperCamelCase :List[Any] = F'''Open Kight Tour cannot be performed on a board of size {n}'''
raise ValueError(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658
| 0
|
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
_A = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
"""simple docstring"""
a_ = os.getenv("""SM_HP_MP_PARAMETERS""" , """{}""" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
a_ = json.loads(UpperCamelCase )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
a_ = os.getenv("""SM_FRAMEWORK_PARAMS""" , """{}""" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
a_ = json.loads(UpperCamelCase )
if not mpi_options.get("""sagemaker_mpi_enabled""" , UpperCamelCase ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("""smdistributed""" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
_lowerCamelCase : str = field(
default="""""" , metadata={"""help""": """Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"""} , )
def __magic_name__ ( self ):
super().__post_init__()
warnings.warn(
"""`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use """
"""`TrainingArguments` instead.""" , _SCREAMING_SNAKE_CASE , )
@cached_property
def __magic_name__ ( self ):
logger.info("""PyTorch: setting up devices""" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"""torch.distributed process group is initialized, but local_rank == -1. """
"""In order to use Torch DDP, launch your script with `python -m torch.distributed.launch""" )
if self.no_cuda:
a_ = torch.device("""cpu""" )
a_ = 0
elif is_sagemaker_model_parallel_available():
a_ = smp.local_rank()
a_ = torch.device("""cuda""" , _SCREAMING_SNAKE_CASE )
a_ = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="""smddp""" , timeout=self.ddp_timeout_delta )
a_ = int(os.getenv("""SMDATAPARALLEL_LOCAL_RANK""" ) )
a_ = torch.device("""cuda""" , self.local_rank )
a_ = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
a_ = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
a_ = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="""nccl""" , timeout=self.ddp_timeout_delta )
a_ = torch.device("""cuda""" , self.local_rank )
a_ = 1
if device.type == "cuda":
torch.cuda.set_device(_SCREAMING_SNAKE_CASE )
return device
@property
def __magic_name__ ( self ):
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def __magic_name__ ( self ):
return not is_sagemaker_model_parallel_available()
@property
def __magic_name__ ( self ):
return False
| 721
|
from math import factorial
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : int , UpperCamelCase : int ) -> int:
"""simple docstring"""
if n < k or k < 0:
raise ValueError("""Please enter positive integers for n and k where n >= k""" )
return factorial(UpperCamelCase ) // (factorial(UpperCamelCase ) * factorial(n - k ))
if __name__ == "__main__":
print(
'The number of five-card hands possible from a standard',
f'fifty-two card deck is: {combinations(52, 5)}\n',
)
print(
'If a class of 40 students must be arranged into groups of',
f'4 for group projects, there are {combinations(40, 4)} ways',
'to arrange them.\n',
)
print(
'If 10 teams are competing in a Formula One race, there',
f'are {combinations(10, 3)} ways that first, second and',
'third place can be awarded.',
)
| 403
| 0
|
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""facebook/detr-resnet-50""": """https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json""",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''detr'''
UpperCamelCase = ['''past_key_values''']
UpperCamelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : int , _UpperCAmelCase : int=True , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : Any=3 , _UpperCAmelCase : Optional[int]=100 , _UpperCAmelCase : Tuple=6 , _UpperCAmelCase : Dict=2048 , _UpperCAmelCase : Optional[int]=8 , _UpperCAmelCase : Optional[Any]=6 , _UpperCAmelCase : int=2048 , _UpperCAmelCase : Union[str, Any]=8 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Optional[Any]="relu" , _UpperCAmelCase : List[str]=256 , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : int=0.0 , _UpperCAmelCase : Optional[int]=0.0 , _UpperCAmelCase : Optional[int]=0.02 , _UpperCAmelCase : Optional[int]=1.0 , _UpperCAmelCase : int=False , _UpperCAmelCase : Any="sine" , _UpperCAmelCase : Union[str, Any]="resnet50" , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : int=False , _UpperCAmelCase : List[str]=1 , _UpperCAmelCase : Optional[Any]=5 , _UpperCAmelCase : Any=2 , _UpperCAmelCase : Any=1 , _UpperCAmelCase : List[str]=1 , _UpperCAmelCase : Optional[Any]=5 , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : List[str]=0.1 , **_UpperCAmelCase : Any , ) -> Union[str, Any]:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCAmelCase_ = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase_ = backbone_config.get("model_type" )
UpperCAmelCase_ = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase_ = config_class.from_dict(_UpperCAmelCase )
# set timm attributes to None
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = None, None, None
UpperCAmelCase_ = use_timm_backbone
UpperCAmelCase_ = backbone_config
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = num_queries
UpperCAmelCase_ = d_model
UpperCAmelCase_ = encoder_ffn_dim
UpperCAmelCase_ = encoder_layers
UpperCAmelCase_ = encoder_attention_heads
UpperCAmelCase_ = decoder_ffn_dim
UpperCAmelCase_ = decoder_layers
UpperCAmelCase_ = decoder_attention_heads
UpperCAmelCase_ = dropout
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = activation_dropout
UpperCAmelCase_ = activation_function
UpperCAmelCase_ = init_std
UpperCAmelCase_ = init_xavier_std
UpperCAmelCase_ = encoder_layerdrop
UpperCAmelCase_ = decoder_layerdrop
UpperCAmelCase_ = encoder_layers
UpperCAmelCase_ = auxiliary_loss
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = backbone
UpperCAmelCase_ = use_pretrained_backbone
UpperCAmelCase_ = dilation
# Hungarian matcher
UpperCAmelCase_ = class_cost
UpperCAmelCase_ = bbox_cost
UpperCAmelCase_ = giou_cost
# Loss coefficients
UpperCAmelCase_ = mask_loss_coefficient
UpperCAmelCase_ = dice_loss_coefficient
UpperCAmelCase_ = bbox_loss_coefficient
UpperCAmelCase_ = giou_loss_coefficient
UpperCAmelCase_ = eos_coefficient
super().__init__(is_encoder_decoder=_UpperCAmelCase , **_UpperCAmelCase )
@property
def lowercase__ ( self : List[Any] ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def lowercase__ ( self : List[Any] ) -> int:
'''simple docstring'''
return self.d_model
@classmethod
def lowercase__ ( cls : Dict , _UpperCAmelCase : PretrainedConfig , **_UpperCAmelCase : Tuple ) -> Any:
'''simple docstring'''
return cls(backbone_config=_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : Optional[int] ) -> Dict[str, any]:
'''simple docstring'''
UpperCAmelCase_ = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
UpperCAmelCase_ = self.backbone_config.to_dict()
UpperCAmelCase_ = self.__class__.model_type
return output
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = version.parse('''1.11''' )
@property
def lowercase__ ( self : str ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def lowercase__ ( self : List[Any] ) -> float:
'''simple docstring'''
return 1e-5
@property
def lowercase__ ( self : List[str] ) -> int:
'''simple docstring'''
return 12
| 82
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class _snake_case :
__A : Dict =BlenderbotConfig
__A : Union[str, Any] ={}
__A : Any ="gelu"
def __init__( self ,_snake_case ,_snake_case=13 ,_snake_case=7 ,_snake_case=True ,_snake_case=False ,_snake_case=99 ,_snake_case=32 ,_snake_case=2 ,_snake_case=4 ,_snake_case=37 ,_snake_case=0.1 ,_snake_case=0.1 ,_snake_case=20 ,_snake_case=2 ,_snake_case=1 ,_snake_case=0 ,):
UpperCAmelCase_ : List[Any] = parent
UpperCAmelCase_ : str = batch_size
UpperCAmelCase_ : Dict = seq_length
UpperCAmelCase_ : int = is_training
UpperCAmelCase_ : Optional[Any] = use_labels
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : Optional[int] = hidden_size
UpperCAmelCase_ : Optional[int] = num_hidden_layers
UpperCAmelCase_ : int = num_attention_heads
UpperCAmelCase_ : Tuple = intermediate_size
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase_ : List[Any] = max_position_embeddings
UpperCAmelCase_ : str = eos_token_id
UpperCAmelCase_ : List[Any] = pad_token_id
UpperCAmelCase_ : List[Any] = bos_token_id
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size )
UpperCAmelCase_ : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 )
UpperCAmelCase_ : Optional[Any] = tf.concat([input_ids, eos_tensor] ,axis=1 )
UpperCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : Optional[Any] = self.config_cls(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,)
UpperCAmelCase_ : List[str] = prepare_blenderbot_inputs_dict(_snake_case ,_snake_case ,_snake_case )
return config, inputs_dict
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ):
UpperCAmelCase_ : Tuple = TFBlenderbotModel(config=_snake_case ).get_decoder()
UpperCAmelCase_ : int = inputs_dict["input_ids"]
UpperCAmelCase_ : Dict = input_ids[:1, :]
UpperCAmelCase_ : Any = inputs_dict["attention_mask"][:1, :]
UpperCAmelCase_ : int = inputs_dict["head_mask"]
UpperCAmelCase_ : Optional[int] = 1
# first forward pass
UpperCAmelCase_ : List[str] = model(_snake_case ,attention_mask=_snake_case ,head_mask=_snake_case ,use_cache=_snake_case )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase_ : Optional[int] = ids_tensor((self.batch_size, 3) ,config.vocab_size )
UpperCAmelCase_ : Any = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta )
# append to next input_ids and
UpperCAmelCase_ : Union[str, Any] = tf.concat([input_ids, next_tokens] ,axis=-1 )
UpperCAmelCase_ : Any = tf.concat([attention_mask, next_attn_mask] ,axis=-1 )
UpperCAmelCase_ : Any = model(_snake_case ,attention_mask=_snake_case )[0]
UpperCAmelCase_ : List[Any] = model(_snake_case ,attention_mask=_snake_case ,past_key_values=_snake_case )[0]
self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] )
# select random slice
UpperCAmelCase_ : str = int(ids_tensor((1,) ,output_from_past.shape[-1] ) )
UpperCAmelCase_ : List[str] = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase_ : Union[str, Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_snake_case ,_snake_case ,rtol=1E-3 )
def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str=None , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : Dict=None , ) -> Union[str, Any]:
"""simple docstring"""
if attention_mask is None:
UpperCAmelCase_ : Dict = tf.cast(tf.math.not_equal(_SCREAMING_SNAKE_CASE , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase_ : Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase_ : List[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase_ : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase_ : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _snake_case (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Union[str, Any] =(TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
__A : List[str] =(TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
__A : Dict =(
{
"conversational": TFBlenderbotForConditionalGeneration,
"feature-extraction": TFBlenderbotModel,
"summarization": TFBlenderbotForConditionalGeneration,
"text2text-generation": TFBlenderbotForConditionalGeneration,
"translation": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
__A : Any =True
__A : Dict =False
__A : Dict =False
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = TFBlenderbotModelTester(self )
UpperCAmelCase_ : int = ConfigTester(self ,config_class=_snake_case )
def UpperCamelCase__ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_snake_case )
@require_tokenizers
@require_tf
class _snake_case (unittest.TestCase):
__A : Optional[int] =["My friends are cool but they eat too many carbs."]
__A : Optional[Any] ="facebook/blenderbot-400M-distill"
@cached_property
def UpperCamelCase__ ( self ):
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = self.tokenizer(self.src_text ,return_tensors="tf" )
UpperCAmelCase_ : Union[str, Any] = self.model.generate(
model_inputs.input_ids ,)
UpperCAmelCase_ : str = self.tokenizer.batch_decode(generated_ids.numpy() ,skip_special_tokens=_snake_case )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 71
| 0
|
"""simple docstring"""
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
A__ : List[Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class __magic_name__ ( datasets.BuilderConfig ):
UpperCamelCase_ = None
UpperCamelCase_ = "utf-8"
UpperCamelCase_ = None
UpperCamelCase_ = None
UpperCamelCase_ = True # deprecated
UpperCamelCase_ = None # deprecated
UpperCamelCase_ = 10 << 20 # 10MB
UpperCamelCase_ = None
class __magic_name__ ( datasets.ArrowBasedBuilder ):
UpperCamelCase_ = JsonConfig
def lowercase_ ( self ) -> str:
"""simple docstring"""
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' )
_lowercase: List[str] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' )
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' )
return datasets.DatasetInfo(features=self.config.features )
def lowercase_ ( self , A_ ) -> Any:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
_lowercase: int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(A_ , (str, list, tuple) ):
_lowercase: Tuple = data_files
if isinstance(A_ , A_ ):
_lowercase: Optional[Any] = [files]
_lowercase: Dict = [dl_manager.iter_files(A_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
_lowercase: str = []
for split_name, files in data_files.items():
if isinstance(A_ , A_ ):
_lowercase: Optional[Any] = [files]
_lowercase: str = [dl_manager.iter_files(A_ ) for file in files]
splits.append(datasets.SplitGenerator(name=A_ , gen_kwargs={'''files''': files} ) )
return splits
def lowercase_ ( self , A_ ) -> pa.Table:
"""simple docstring"""
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
_lowercase: Any = self.config.features.arrow_schema.field(A_ ).type
_lowercase: Optional[Any] = pa_table.append_column(A_ , pa.array([None] * len(A_ ) , type=A_ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowercase: Optional[int] = table_cast(A_ , self.config.features.arrow_schema )
return pa_table
def lowercase_ ( self , A_ ) -> str:
"""simple docstring"""
for file_idx, file in enumerate(itertools.chain.from_iterable(A_ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(A_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
_lowercase: Optional[int] = json.load(A_ )
# We keep only the field we are interested in
_lowercase: str = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(A_ , (list, tuple) ):
_lowercase: Dict = set().union(*[row.keys() for row in dataset] )
_lowercase: List[str] = {col: [row.get(A_ ) for row in dataset] for col in keys}
else:
_lowercase: Dict = dataset
_lowercase: Union[str, Any] = pa.Table.from_pydict(A_ )
yield file_idx, self._cast_table(A_ )
# If the file has one json object per line
else:
with open(A_ , '''rb''' ) as f:
_lowercase: int = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
_lowercase: Optional[int] = max(self.config.chunksize // 32 , 16 << 10 )
_lowercase: List[Any] = (
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
_lowercase: Union[str, Any] = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(A_ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
_lowercase: Any = batch.decode(self.config.encoding , errors=A_ ).encode('''utf-8''' )
try:
while True:
try:
_lowercase: Optional[int] = paj.read_json(
io.BytesIO(A_ ) , read_options=paj.ReadOptions(block_size=A_ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(A_ , pa.ArrowInvalid )
and "straddling" not in str(A_ )
or block_size > len(A_ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f'''Batch of {len(A_ )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.''' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
A_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
_lowercase: Optional[Any] = json.load(A_ )
except json.JSONDecodeError:
logger.error(f'''Failed to read file \'{file}\' with error {type(A_ )}: {e}''' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(A_ , A_ ): # list is the only sequence type supported in JSON
try:
_lowercase: Optional[int] = set().union(*[row.keys() for row in dataset] )
_lowercase: Tuple = {col: [row.get(A_ ) for row in dataset] for col in keys}
_lowercase: str = pa.Table.from_pydict(A_ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(A_ )}: {e}''' )
raise ValueError(f'''Not able to read records in the JSON file at {file}.''' ) from None
yield file_idx, self._cast_table(A_ )
break
else:
logger.error(f'''Failed to read file \'{file}\' with error {type(A_ )}: {e}''' )
raise ValueError(
f'''Not able to read records in the JSON file at {file}. '''
f'''You should probably indicate the field of the JSON file containing your records. '''
f'''This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '''
f'''Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ''' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(A_ )
batch_idx += 1
| 272
|
"""simple docstring"""
import comet # From: unbabel-comet
import torch
import datasets
A__ : int = datasets.logging.get_logger(__name__)
A__ : Optional[Any] = '\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = "{COMET}: A Neural Framework for {MT} Evaluation",\n author = "Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon",\n booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",\n month = nov,\n year = "2020",\n address = "Online",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",\n pages = "2685--2702",\n}\n'
A__ : Union[str, Any] = '\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n'
A__ : int = '\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric(\'comet\')\n >>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use\n >>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]\n >>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]\n >>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results["scores"]])\n [0.19, 0.92]\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def lowercase_ ( self ) -> Tuple:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''sources''': datasets.Value('''string''' , id='''sequence''' ),
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[
'''https://github.com/Unbabel/COMET''',
'''https://www.aclweb.org/anthology/2020.emnlp-main.213/''',
'''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''',
] , )
def lowercase_ ( self , A_ ) -> Dict:
"""simple docstring"""
if self.config_name == "default":
_lowercase: str = comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da''' ) )
else:
_lowercase: Optional[Any] = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def lowercase_ ( self , A_ , A_ , A_ , A_=None , A_=False ) -> List[str]:
"""simple docstring"""
if gpus is None:
_lowercase: Union[str, Any] = 1 if torch.cuda.is_available() else 0
_lowercase: Any = {'''src''': sources, '''mt''': predictions, '''ref''': references}
_lowercase: int = [dict(zip(A_ , A_ ) ) for t in zip(*data.values() )]
_lowercase , _lowercase: Tuple = self.scorer.predict(A_ , gpus=A_ , progress_bar=A_ )
return {"mean_score": mean_score, "scores": scores}
| 272
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : str = '''▁'''
_UpperCAmelCase : Union[str, Any] = {'''vocab_file''': '''spiece.model'''}
_UpperCAmelCase : Union[str, Any] = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''}
}
_UpperCAmelCase : List[Any] = {
'''google/pegasus-xsum''': 5_12,
}
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = ['input_ids', 'attention_mask']
def __init__( self , snake_case_ , snake_case_="<pad>" , snake_case_="</s>" , snake_case_="<unk>" , snake_case_="<mask_2>" , snake_case_="<mask_1>" , snake_case_=None , snake_case_=1_03 , snake_case_ = None , **snake_case_ , ):
lowercase =offset
if additional_special_tokens is not None:
if not isinstance(snake_case_ , snake_case_ ):
raise TypeError(
f'additional_special_tokens should be of type {type(snake_case_ )}, but is'
f' {type(snake_case_ )}' )
lowercase =(
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'<unk_{i}>' for i in range(len(snake_case_ ) , self.offset - 1 )
]
if len(set(snake_case_ ) ) != len(snake_case_ ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' )
lowercase =additional_special_tokens_extended
else:
lowercase =[mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'<unk_{i}>' for i in range(2 , self.offset )]
lowercase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=snake_case_ , unk_token=snake_case_ , mask_token=snake_case_ , pad_token=snake_case_ , mask_token_sent=snake_case_ , offset=snake_case_ , additional_special_tokens=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , **snake_case_ , )
lowercase =mask_token_sent
lowercase =vocab_file
lowercase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case_ )
# add special tokens to encoder dict
lowercase ={
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
lowercase ={v: k for k, v in self.encoder.items()}
@property
def _A( self ):
return len(self.sp_model ) + self.offset
def _A( self ):
lowercase ={self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
lowercase =self.__dict__.copy()
lowercase =None
return state
def __setstate__( self , snake_case_ ):
lowercase =d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase ={}
lowercase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _A( self , snake_case_ ):
return self.sp_model.encode(snake_case_ , out_type=snake_case_ )
def _A( self , snake_case_ ):
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
lowercase =self.sp_model.piece_to_id(snake_case_ )
return sp_id + self.offset
def _A( self , snake_case_ ):
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
lowercase =self.sp_model.IdToPiece(index - self.offset )
return token
def _A( self , snake_case_ ):
lowercase =[]
lowercase =''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(snake_case_ ) + token
lowercase =[]
else:
current_sub_tokens.append(snake_case_ )
out_string += self.sp_model.decode(snake_case_ )
return out_string.strip()
def _A( self , snake_case_=False ):
return 1
def _A( self , snake_case_ ):
lowercase =set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def _A( self , snake_case_ , snake_case_ = None , snake_case_ = False ):
if already_has_special_tokens:
return self._special_token_mask(snake_case_ )
elif token_ids_a is None:
return self._special_token_mask(snake_case_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _A( self , snake_case_ , snake_case_=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _A( self , snake_case_ , snake_case_ = None ):
if not os.path.isdir(snake_case_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase =os.path.join(
snake_case_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case_ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case_ , '''wb''' ) as fi:
lowercase =self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (out_vocab_file,)
| 72
|
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
UpperCamelCase__ : Dict = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_=None ) -> Dict:
"""simple docstring"""
if subparsers is not None:
_SCREAMING_SNAKE_CASE = subparsers.add_parser("""tpu-config""" , description=_description )
else:
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser("""Accelerate tpu-config command""" , description=_description )
# Core arguments
_SCREAMING_SNAKE_CASE = parser.add_argument_group(
"""Config Arguments""" , """Arguments that can be configured through `accelerate config`.""" )
config_args.add_argument(
"""--config_file""" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help="""Path to the config file to use for accelerate.""" , )
config_args.add_argument(
"""--tpu_name""" , default=SCREAMING_SNAKE_CASE_ , help="""The name of the TPU to use. If not specified, will use the TPU specified in the config file.""" , )
config_args.add_argument(
"""--tpu_zone""" , default=SCREAMING_SNAKE_CASE_ , help="""The zone of the TPU to use. If not specified, will use the zone specified in the config file.""" , )
_SCREAMING_SNAKE_CASE = parser.add_argument_group("""TPU Arguments""" , """Arguments for options ran inside the TPU.""" )
pod_args.add_argument(
"""--use_alpha""" , action="""store_true""" , help="""Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.""" , )
pod_args.add_argument(
"""--command_file""" , default=SCREAMING_SNAKE_CASE_ , help="""The path to the file containing the commands to run on the pod on startup.""" , )
pod_args.add_argument(
"""--command""" , action="""append""" , nargs="""+""" , help="""A command to run on the pod. Can be passed multiple times.""" , )
pod_args.add_argument(
"""--install_accelerate""" , action="""store_true""" , help="""Whether to install accelerate on the pod. Defaults to False.""" , )
pod_args.add_argument(
"""--accelerate_version""" , default="""latest""" , help="""The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.""" , )
pod_args.add_argument(
"""--debug""" , action="""store_true""" , help="""If set, will print the command that would be run instead of running it.""" )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE_ )
return parser
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
_SCREAMING_SNAKE_CASE = defaults.command_file
if not args.command and defaults.commands is not None:
_SCREAMING_SNAKE_CASE = defaults.commands
if not args.tpu_name:
_SCREAMING_SNAKE_CASE = defaults.tpu_name
if not args.tpu_zone:
_SCREAMING_SNAKE_CASE = defaults.tpu_zone
if args.accelerate_version == "dev":
_SCREAMING_SNAKE_CASE = """git+https://github.com/huggingface/accelerate.git"""
elif args.accelerate_version == "latest":
_SCREAMING_SNAKE_CASE = """accelerate -U"""
elif isinstance(parse(args.accelerate_version ) , SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = F"accelerate=={args.accelerate_version}"
if not args.command_file and not args.command:
raise ValueError("""You must specify either a command file or a command to run on the pod.""" )
if args.command_file:
with open(args.command_file , """r""" ) as f:
_SCREAMING_SNAKE_CASE = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
_SCREAMING_SNAKE_CASE = ["""cd /usr/share"""]
if args.install_accelerate:
new_cmd += [F"pip install {args.accelerate_version}"]
new_cmd += args.command
_SCREAMING_SNAKE_CASE = """; """.join(SCREAMING_SNAKE_CASE_ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
_SCREAMING_SNAKE_CASE = ["""gcloud"""]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F"Running {' '.join(SCREAMING_SNAKE_CASE_ )}" )
return
subprocess.run(SCREAMING_SNAKE_CASE_ )
print("""Successfully setup pod.""" )
def lowerCAmelCase_ ( ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = tpu_command_parser()
_SCREAMING_SNAKE_CASE = parser.parse_args()
tpu_command_launcher(SCREAMING_SNAKE_CASE_ )
| 591
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase_ : Tuple = {
"""configuration_m2m_100""": ["""M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP""", """M2M100Config""", """M2M100OnnxConfig"""],
"""tokenization_m2m_100""": ["""M2M100Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Any = [
"""M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""M2M100ForConditionalGeneration""",
"""M2M100Model""",
"""M2M100PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 718
|
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
UpperCAmelCase_ : int = get_logger(__name__)
class lowercase__ ( enum.Enum ):
__UpperCamelCase = """all_checks"""
__UpperCamelCase = """basic_checks"""
__UpperCamelCase = """no_checks"""
class lowercase__ ( __A ):
pass
class lowercase__ ( __A ):
pass
class lowercase__ ( __A ):
pass
class lowercase__ ( __A ):
pass
def _lowerCAmelCase ( _a : Optional[dict] , _a : dict , _a : str=None ) -> Optional[Any]:
if expected_checksums is None:
logger.info("""Unable to verify checksums.""" )
return
if len(set(_a ) - set(_a ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(_a ) - set(_a ) ) )
if len(set(_a ) - set(_a ) ) > 0:
raise UnexpectedDownloadedFile(str(set(_a ) - set(_a ) ) )
lowerCAmelCase_ : Union[str, Any] = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
lowerCAmelCase_ : Optional[Any] = """ for """ + verification_name if verification_name is not None else """"""
if len(_a ) > 0:
raise NonMatchingChecksumError(
F'Checksums didn\'t match{for_verification_name}:\n'
F'{bad_urls}\n'
"""Set `verification_mode='no_checks'` to skip checksums verification and ignore this error""" )
logger.info("""All the checksums matched successfully""" + for_verification_name )
class lowercase__ ( __A ):
pass
class lowercase__ ( __A ):
pass
class lowercase__ ( __A ):
pass
class lowercase__ ( __A ):
pass
def _lowerCAmelCase ( _a : Optional[dict] , _a : dict ) -> str:
if expected_splits is None:
logger.info("""Unable to verify splits sizes.""" )
return
if len(set(_a ) - set(_a ) ) > 0:
raise ExpectedMoreSplits(str(set(_a ) - set(_a ) ) )
if len(set(_a ) - set(_a ) ) > 0:
raise UnexpectedSplits(str(set(_a ) - set(_a ) ) )
lowerCAmelCase_ : Optional[int] = [
{"""expected""": expected_splits[name], """recorded""": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(_a ) > 0:
raise NonMatchingSplitsSizesError(str(_a ) )
logger.info("""All the splits matched successfully.""" )
def _lowerCAmelCase ( _a : str , _a : bool = True ) -> dict:
if record_checksum:
lowerCAmelCase_ : int = shaaaa()
with open(_a , """rb""" ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , B"""""" ):
m.update(_a )
lowerCAmelCase_ : List[str] = m.hexdigest()
else:
lowerCAmelCase_ : Union[str, Any] = None
return {"num_bytes": os.path.getsize(_a ), "checksum": checksum}
def _lowerCAmelCase ( _a : Tuple ) -> Optional[int]:
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 440
| 0
|
"""simple docstring"""
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_enforce_args(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if n == 0:
return 0
UpperCamelCase : List[str] = float("""-inf""" )
for i in range(1 , n + 1 ):
UpperCamelCase : str = max(
SCREAMING_SNAKE_CASE , prices[i - 1] + naive_cut_rod_recursive(n - i , SCREAMING_SNAKE_CASE ) )
return max_revue
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_enforce_args(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = [float("""-inf""" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
UpperCamelCase : Any = float("""-inf""" )
for i in range(1 , n + 1 ):
UpperCamelCase : str = max(
SCREAMING_SNAKE_CASE , prices[i - 1] + _top_down_cut_rod_recursive(n - i , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , )
UpperCamelCase : Any = max_revenue
return max_rev[n]
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_enforce_args(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
UpperCamelCase : str = [float("""-inf""" ) for _ in range(n + 1 )]
UpperCamelCase : List[Any] = 0
for i in range(1 , n + 1 ):
UpperCamelCase : str = max_rev[i]
for j in range(1 , i + 1 ):
UpperCamelCase : Union[str, Any] = max(SCREAMING_SNAKE_CASE , prices[j - 1] + max_rev[i - j] )
UpperCamelCase : Any = max_revenue_i
return max_rev[n]
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if n < 0:
UpperCamelCase : List[Any] = f"""n must be greater than or equal to 0. Got n = {n}"""
raise ValueError(SCREAMING_SNAKE_CASE )
if n > len(SCREAMING_SNAKE_CASE ):
UpperCamelCase : Dict = (
"""Each integral piece of rod must have a corresponding price. """
f"""Got n = {n} but length of prices = {len(SCREAMING_SNAKE_CASE )}"""
)
raise ValueError(SCREAMING_SNAKE_CASE )
def UpperCamelCase ():
UpperCamelCase : Tuple = [6, 10, 12, 15, 20, 23]
UpperCamelCase : Optional[Any] = len(SCREAMING_SNAKE_CASE )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
UpperCamelCase : Optional[int] = 36
UpperCamelCase : Optional[int] = top_down_cut_rod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase : str = bottom_up_cut_rod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase : int = naive_cut_rod_recursive(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 102
|
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
__magic_name__ : str = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , ):
'''simple docstring'''
super().__init__()
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
speech_model=_A , speech_processor=_A , vae=_A , text_encoder=_A , tokenizer=_A , unet=_A , scheduler=_A , feature_extractor=_A , )
def _a ( self , _A = "auto" ):
'''simple docstring'''
if slice_size == "auto":
UpperCamelCase : List[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_A )
def _a ( self ):
'''simple docstring'''
self.enable_attention_slicing(_A )
@torch.no_grad()
def __call__( self , _A , _A=1_6_0_0_0 , _A = 5_1_2 , _A = 5_1_2 , _A = 5_0 , _A = 7.5 , _A = None , _A = 1 , _A = 0.0 , _A = None , _A = None , _A = "pil" , _A = True , _A = None , _A = 1 , **_A , ):
'''simple docstring'''
UpperCamelCase : str = self.speech_processor.feature_extractor(
_A , return_tensors="""pt""" , sampling_rate=_A ).input_features.to(self.device )
UpperCamelCase : List[Any] = self.speech_model.generate(_A , max_length=4_8_0_0_0_0 )
UpperCamelCase : Optional[int] = self.speech_processor.tokenizer.batch_decode(_A , skip_special_tokens=_A , normalize=_A )[
0
]
if isinstance(_A , _A ):
UpperCamelCase : Tuple = 1
elif isinstance(_A , _A ):
UpperCamelCase : List[Any] = len(_A )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(_A )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_A , _A ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(_A )}.""" )
# get prompt text embeddings
UpperCamelCase : Dict = self.tokenizer(
_A , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
UpperCamelCase : Dict = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase : Tuple = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
UpperCamelCase : int = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCamelCase : List[str] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[Any] = text_embeddings.shape
UpperCamelCase : Optional[int] = text_embeddings.repeat(1 , _A , 1 )
UpperCamelCase : Union[str, Any] = text_embeddings.view(bs_embed * num_images_per_prompt , _A , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCamelCase : Optional[Any] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase : List[str]
if negative_prompt is None:
UpperCamelCase : str = [""""""] * batch_size
elif type(_A ) is not type(_A ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(_A )} !="""
f""" {type(_A )}.""" )
elif isinstance(_A , _A ):
UpperCamelCase : Tuple = [negative_prompt]
elif batch_size != len(_A ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(_A )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
""" the batch size of `prompt`.""" )
else:
UpperCamelCase : Any = negative_prompt
UpperCamelCase : Optional[int] = text_input_ids.shape[-1]
UpperCamelCase : List[str] = self.tokenizer(
_A , padding="""max_length""" , max_length=_A , truncation=_A , return_tensors="""pt""" , )
UpperCamelCase : Optional[int] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase : List[Any] = uncond_embeddings.shape[1]
UpperCamelCase : Dict = uncond_embeddings.repeat(1 , _A , 1 )
UpperCamelCase : Dict = uncond_embeddings.view(batch_size * num_images_per_prompt , _A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase : str = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCamelCase : Any = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCamelCase : int = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCamelCase : Tuple = torch.randn(_A , generator=_A , device="""cpu""" , dtype=_A ).to(
self.device )
else:
UpperCamelCase : Any = torch.randn(_A , generator=_A , device=self.device , dtype=_A )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
UpperCamelCase : Optional[Any] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCamelCase : Tuple = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase : Any = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase : List[Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase : str = {}
if accepts_eta:
UpperCamelCase : Union[str, Any] = eta
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase : str = self.scheduler.scale_model_input(_A , _A )
# predict the noise residual
UpperCamelCase : Optional[Any] = self.unet(_A , _A , encoder_hidden_states=_A ).sample
# perform guidance
if do_classifier_free_guidance:
UpperCamelCase , UpperCamelCase : str = noise_pred.chunk(2 )
UpperCamelCase : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase : Any = self.scheduler.step(_A , _A , _A , **_A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_A , _A , _A )
UpperCamelCase : Optional[Any] = 1 / 0.1_82_15 * latents
UpperCamelCase : Union[str, Any] = self.vae.decode(_A ).sample
UpperCamelCase : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase : Optional[Any] = self.numpy_to_pil(_A )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=_A , nsfw_content_detected=_A )
| 102
| 1
|
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 10**-10 ) -> float:
'''simple docstring'''
SCREAMING_SNAKE_CASE = a
while True:
SCREAMING_SNAKE_CASE = Decimal(_SCREAMING_SNAKE_CASE ) - (
Decimal(eval(_SCREAMING_SNAKE_CASE ) ) / Decimal(eval(str(diff(_SCREAMING_SNAKE_CASE ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(_SCREAMING_SNAKE_CASE ) ) < precision: # noqa: S307
return float(_SCREAMING_SNAKE_CASE )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''')
# Find root of polynomial
print(F'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}''')
# Find Square Root of 5
print(F'''The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}''')
# Exponential Roots
print(F'''The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}''')
| 716
|
import os
from datetime import datetime as dt
from github import Github
SCREAMING_SNAKE_CASE_ = [
"""good first issue""",
"""feature request""",
"""wip""",
]
def __lowercase ( ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Github(os.environ["""GITHUB_TOKEN"""] )
SCREAMING_SNAKE_CASE = g.get_repo("""huggingface/accelerate""" )
SCREAMING_SNAKE_CASE = repo.get_issues(state="""open""" )
for issue in open_issues:
SCREAMING_SNAKE_CASE = sorted([comment for comment in issue.get_comments()] , key=lambda _SCREAMING_SNAKE_CASE : i.created_at , reverse=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = comments[0] if len(_SCREAMING_SNAKE_CASE ) > 0 else None
SCREAMING_SNAKE_CASE = dt.utcnow()
SCREAMING_SNAKE_CASE = (current_time - issue.updated_at).days
SCREAMING_SNAKE_CASE = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state="""closed""" )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 116
| 0
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class a__( unittest.TestCase ):
def _lowercase ( self ) -> List[str]:
snake_case__ =tempfile.mkdtemp()
snake_case__ =[
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'的',
'价',
'格',
'是',
'15',
'便',
'alex',
'##andra',
',',
'。',
'-',
't',
'shirt',
]
snake_case__ =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
snake_case__ ={
'do_resize': True,
'size': {'height': 224, 'width': 224},
'do_center_crop': True,
'crop_size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.48_145_466, 0.4_578_275, 0.40_821_073],
'image_std': [0.26_862_954, 0.26_130_258, 0.27_577_711],
'do_convert_rgb': True,
}
snake_case__ =os.path.join(self.tmpdirname , _UpperCAmelCase )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
def _lowercase ( self , **_UpperCAmelCase ) -> str:
return BertTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def _lowercase ( self , **_UpperCAmelCase ) -> List[str]:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def _lowercase ( self , **_UpperCAmelCase ) -> Tuple:
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def _lowercase ( self ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def _lowercase ( self ) -> Optional[Any]:
snake_case__ =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
snake_case__ =[Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowercase ( self ) -> Union[str, Any]:
snake_case__ =self.get_tokenizer()
snake_case__ =self.get_rust_tokenizer()
snake_case__ =self.get_image_processor()
snake_case__ =ChineseCLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
snake_case__ =ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCAmelCase )
snake_case__ =ChineseCLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
snake_case__ =ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , _UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , _UpperCAmelCase )
def _lowercase ( self ) -> int:
snake_case__ =ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case__ =self.get_tokenizer(cls_token='(CLS)' , sep_token='(SEP)' )
snake_case__ =self.get_image_processor(do_normalize=_UpperCAmelCase )
snake_case__ =ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='(CLS)' , sep_token='(SEP)' , do_normalize=_UpperCAmelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def _lowercase ( self ) -> Optional[Any]:
snake_case__ =self.get_image_processor()
snake_case__ =self.get_tokenizer()
snake_case__ =ChineseCLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
snake_case__ =self.prepare_image_inputs()
snake_case__ =image_processor(_UpperCAmelCase , return_tensors='np' )
snake_case__ =processor(images=_UpperCAmelCase , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowercase ( self ) -> Dict:
snake_case__ =self.get_image_processor()
snake_case__ =self.get_tokenizer()
snake_case__ =ChineseCLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
snake_case__ ='Alexandra,T-shirt的价格是15便士。'
snake_case__ =processor(text=_UpperCAmelCase )
snake_case__ =tokenizer(_UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowercase ( self ) -> Dict:
snake_case__ =self.get_image_processor()
snake_case__ =self.get_tokenizer()
snake_case__ =ChineseCLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
snake_case__ ='Alexandra,T-shirt的价格是15便士。'
snake_case__ =self.prepare_image_inputs()
snake_case__ =processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_UpperCAmelCase ):
processor()
def _lowercase ( self ) -> str:
snake_case__ =self.get_image_processor()
snake_case__ =self.get_tokenizer()
snake_case__ =ChineseCLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
snake_case__ =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case__ =processor.batch_decode(_UpperCAmelCase )
snake_case__ =tokenizer.batch_decode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowercase ( self ) -> int:
snake_case__ =self.get_image_processor()
snake_case__ =self.get_tokenizer()
snake_case__ =ChineseCLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
snake_case__ ='Alexandra,T-shirt的价格是15便士。'
snake_case__ =self.prepare_image_inputs()
snake_case__ =processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 538
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
SCREAMING_SNAKE_CASE__ : Dict = {'''tokenization_wav2vec2_phoneme''': ['''Wav2Vec2PhonemeCTCTokenizer''']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
SCREAMING_SNAKE_CASE__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 538
| 1
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self):
_UpperCamelCase : Dict = XLMRobertaModel.from_pretrained('xlm-roberta-base')
_UpperCamelCase : Optional[int] = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]])
# The dog is cute and lives in the garden house
_UpperCamelCase : str = torch.Size((1, 12, 7_68)) # batch_size, sequence_length, embedding_vector_dim
_UpperCamelCase : Optional[int] = torch.tensor(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_UpperCamelCase : List[Any] = model(__snake_case)['last_hidden_state'].detach()
self.assertEqual(output.shape , __snake_case)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __snake_case , atol=1e-3))
@slow
def A__ ( self):
_UpperCamelCase : Optional[Any] = XLMRobertaModel.from_pretrained('xlm-roberta-large')
_UpperCamelCase : Tuple = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]])
# The dog is cute and lives in the garden house
_UpperCamelCase : Dict = torch.Size((1, 12, 10_24)) # batch_size, sequence_length, embedding_vector_dim
_UpperCamelCase : List[str] = torch.tensor(
[[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_UpperCamelCase : str = model(__snake_case)['last_hidden_state'].detach()
self.assertEqual(output.shape , __snake_case)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __snake_case , atol=1e-3))
| 648
|
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 648
| 1
|
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A ( __lowercase , unittest.TestCase ):
_snake_case =MgpstrTokenizer
_snake_case =False
_snake_case ={}
_snake_case =False
def lowerCAmelCase__ ( self: List[str] ) -> List[str]:
'''simple docstring'''
super().setUp()
# fmt: off
UpperCAmelCase_ =["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
UpperCAmelCase_ =dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
UpperCAmelCase_ =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + "\n" )
def lowerCAmelCase__ ( self: Union[str, Any] , **_lowerCAmelCase: Tuple ) -> str:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Any , _lowerCAmelCase: Tuple ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ ="tester"
UpperCAmelCase_ ="tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def lowerCAmelCase__ ( self: Any ) -> Any:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: int ) -> int:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCAmelCase_ ="[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
UpperCAmelCase_ =tokenizer.encode([special_token] , add_special_tokens=_lowerCAmelCase )
self.assertEqual(len(_lowerCAmelCase ) , 1 )
UpperCAmelCase_ =tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
self.assertTrue(special_token not in decoded )
def lowerCAmelCase__ ( self: Tuple ) -> str:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCAmelCase_ , UpperCAmelCase_ =self.get_input_output_texts(_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.tokenize(_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
self.assertNotEqual(len(_lowerCAmelCase ) , 0 )
UpperCAmelCase_ =tokenizer.decode(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(text_a.replace(" " , "" ) , _lowerCAmelCase )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def lowerCAmelCase__ ( self: Optional[Any] ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def lowerCAmelCase__ ( self: str ) -> Union[str, Any]:
'''simple docstring'''
pass
| 54
|
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError("The length of profit and weight must be same." )
if max_weight <= 0:
raise ValueError("max_weight must greater than zero." )
if any(p < 0 for p in profit ):
raise ValueError("Profit can not be negative." )
if any(w < 0 for w in weight ):
raise ValueError("Weight can not be negative." )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
UpperCAmelCase_ =[p / w for p, w in zip(lowercase__ , lowercase__ )]
# Creating a copy of the list and sorting profit/weight in ascending order
UpperCAmelCase_ =sorted(lowercase__ )
# declaring useful variables
UpperCAmelCase_ =len(lowercase__ )
UpperCAmelCase_ =0
UpperCAmelCase_ =0
UpperCAmelCase_ =0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
UpperCAmelCase_ =sorted_profit_by_weight[length - i - 1]
UpperCAmelCase_ =profit_by_weight.index(lowercase__ )
UpperCAmelCase_ =-1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"""Input profits, weights, and then max_weight (all positive ints) separated by """
"""spaces."""
)
__lowercase : List[str] =[int(x) for x in input("""Input profits separated by spaces: """).split()]
__lowercase : Union[str, Any] =[int(x) for x in input("""Input weights separated by spaces: """).split()]
__lowercase : Tuple =int(input("""Max weight allowed: """))
# Function Call
calc_profit(profit, weight, max_weight)
| 54
| 1
|
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class UpperCamelCase_ ( unittest.TestCase ):
lowercase = MODEL_FOR_CAUSAL_LM_MAPPING
lowercase = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def snake_case__( self ) -> Optional[int]:
_a : Union[str, Any] = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' )
# Using `do_sample=False` to force deterministic output
_a : Tuple = text_generator('''This is a test''' , do_sample=lowercase )
self.assertEqual(
lowercase , [
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
] , )
_a : Tuple = text_generator(['''This is a test''', '''This is a second test'''] )
self.assertEqual(
lowercase , [
[
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'''
''' oscope. oscope. FiliFili@@'''
)
}
],
] , )
_a : Optional[int] = text_generator('''This is a test''' , do_sample=lowercase , num_return_sequences=2 , return_tensors=lowercase )
self.assertEqual(
lowercase , [
{'''generated_token_ids''': ANY(lowercase )},
{'''generated_token_ids''': ANY(lowercase )},
] , )
_a : Optional[Any] = text_generator.model.config.eos_token_id
_a : Optional[int] = '''<pad>'''
_a : Dict = text_generator(
['''This is a test''', '''This is a second test'''] , do_sample=lowercase , num_return_sequences=2 , batch_size=2 , return_tensors=lowercase , )
self.assertEqual(
lowercase , [
[
{'''generated_token_ids''': ANY(lowercase )},
{'''generated_token_ids''': ANY(lowercase )},
],
[
{'''generated_token_ids''': ANY(lowercase )},
{'''generated_token_ids''': ANY(lowercase )},
],
] , )
@require_tf
def snake_case__( self ) -> Optional[Any]:
_a : Dict = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' )
# Using `do_sample=False` to force deterministic output
_a : Any = text_generator('''This is a test''' , do_sample=lowercase )
self.assertEqual(
lowercase , [
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
] , )
_a : Union[str, Any] = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=lowercase )
self.assertEqual(
lowercase , [
[
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'''
''' Cannes 閲閲Cannes Cannes Cannes 攵 please,'''
)
}
],
] , )
def snake_case__( self , lowercase , lowercase , lowercase ) -> Any:
_a : int = TextGenerationPipeline(model=lowercase , tokenizer=lowercase )
return text_generator, ["This is a test", "Another test"]
def snake_case__( self ) -> Tuple:
_a : Optional[int] = '''Hello I believe in'''
_a : Any = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
_a : Union[str, Any] = text_generator(lowercase )
self.assertEqual(
lowercase , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , )
_a : Any = text_generator(lowercase , stop_sequence=''' fe''' )
self.assertEqual(lowercase , [{'''generated_text''': '''Hello I believe in fe'''}] )
def snake_case__( self , lowercase , lowercase ) -> List[str]:
_a : Dict = text_generator.model
_a : Optional[Any] = text_generator.tokenizer
_a : Dict = text_generator('''This is a test''' )
self.assertEqual(lowercase , [{'''generated_text''': ANY(lowercase )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
_a : str = text_generator('''This is a test''' , return_full_text=lowercase )
self.assertEqual(lowercase , [{'''generated_text''': ANY(lowercase )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
_a : Optional[int] = pipeline(task='''text-generation''' , model=lowercase , tokenizer=lowercase , return_full_text=lowercase )
_a : List[str] = text_generator('''This is a test''' )
self.assertEqual(lowercase , [{'''generated_text''': ANY(lowercase )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
_a : List[str] = text_generator('''This is a test''' , return_full_text=lowercase )
self.assertEqual(lowercase , [{'''generated_text''': ANY(lowercase )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
_a : Optional[Any] = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=lowercase )
self.assertEqual(
lowercase , [
[{'''generated_text''': ANY(lowercase )}, {'''generated_text''': ANY(lowercase )}],
[{'''generated_text''': ANY(lowercase )}, {'''generated_text''': ANY(lowercase )}],
] , )
if text_generator.tokenizer.pad_token is not None:
_a : Dict = text_generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=lowercase )
self.assertEqual(
lowercase , [
[{'''generated_text''': ANY(lowercase )}, {'''generated_text''': ANY(lowercase )}],
[{'''generated_text''': ANY(lowercase )}, {'''generated_text''': ANY(lowercase )}],
] , )
with self.assertRaises(lowercase ):
_a : int = text_generator('''test''' , return_full_text=lowercase , return_text=lowercase )
with self.assertRaises(lowercase ):
_a : Union[str, Any] = text_generator('''test''' , return_full_text=lowercase , return_tensors=lowercase )
with self.assertRaises(lowercase ):
_a : List[Any] = text_generator('''test''' , return_text=lowercase , return_tensors=lowercase )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
_a : str = text_generator('''''' )
self.assertEqual(lowercase , [{'''generated_text''': ANY(lowercase )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
_a : Optional[Any] = text_generator('''''' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
_a : Optional[Any] = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM''']
if (
tokenizer.model_max_length < 10_000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('''This is a test''' * 500 , max_new_tokens=20 )
_a : Optional[Any] = text_generator('''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(lowercase ):
text_generator(
'''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def snake_case__( self ) -> List[str]:
import torch
# Classic `model_kwargs`
_a : List[str] = pipeline(
model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
_a : int = pipe('''This is a test''' )
self.assertEqual(
lowercase , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
_a : Dict = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
_a : int = pipe('''This is a test''' )
self.assertEqual(
lowercase , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
_a : Tuple = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
_a : List[str] = pipe('''This is a test''' )
self.assertEqual(
lowercase , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
@require_torch
@require_torch_gpu
def snake_case__( self ) -> List[Any]:
import torch
_a : Optional[int] = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa )
pipe('''This is a test''' )
@require_torch
@require_accelerate
@require_torch_gpu
def snake_case__( self ) -> List[Any]:
import torch
_a : List[Any] = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa )
pipe('''This is a test''' , do_sample=lowercase , top_p=0.5 )
def snake_case__( self ) -> Tuple:
_a : Tuple = '''Hello world'''
_a : Dict = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
if text_generator.model.framework == "tf":
_a : List[str] = logging.get_logger('''transformers.generation.tf_utils''' )
else:
_a : Optional[Any] = logging.get_logger('''transformers.generation.utils''' )
_a : List[Any] = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(lowercase ) as cl:
_a : str = text_generator(lowercase , max_length=10 , max_new_tokens=1 )
self.assertIn(lowercase , cl.out )
# The user only sets one -> no warning
with CaptureLogger(lowercase ) as cl:
_a : Optional[Any] = text_generator(lowercase , max_new_tokens=1 )
self.assertNotIn(lowercase , cl.out )
with CaptureLogger(lowercase ) as cl:
_a : List[str] = text_generator(lowercase , max_length=10 )
self.assertNotIn(lowercase , cl.out )
| 307
|
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
"""simple docstring"""
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
_a : Optional[int] = mf_knapsack(i - 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
else:
_a : str = max(
mf_knapsack(i - 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , mf_knapsack(i - 1 , UpperCAmelCase , UpperCAmelCase , j - wt[i - 1] ) + val[i - 1] , )
_a : Any = val
return f[i][j]
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
"""simple docstring"""
_a : int = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
_a : Dict = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
_a : List[Any] = dp[i - 1][w_]
return dp[n][w_], dp
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
"""simple docstring"""
if not (isinstance(UpperCAmelCase , (list, tuple) ) and isinstance(UpperCAmelCase , (list, tuple) )):
raise ValueError(
'''Both the weights and values vectors must be either lists or tuples''' )
_a : List[str] = len(UpperCAmelCase )
if num_items != len(UpperCAmelCase ):
_a : List[str] = (
'''The number of weights must be the same as the number of values.\n'''
F'But got {num_items} weights and {len(UpperCAmelCase )} values'
)
raise ValueError(UpperCAmelCase )
for i in range(UpperCAmelCase ):
if not isinstance(wt[i] , UpperCAmelCase ):
_a : Dict = (
'''All weights must be integers but got weight of '''
F'type {type(wt[i] )} at index {i}'
)
raise TypeError(UpperCAmelCase )
_a , _a : str = knapsack(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
_a : set = set()
_construct_solution(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return optimal_val, example_optional_set
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(UpperCAmelCase , UpperCAmelCase , i - 1 , UpperCAmelCase , UpperCAmelCase )
else:
optimal_set.add(UpperCAmelCase )
_construct_solution(UpperCAmelCase , UpperCAmelCase , i - 1 , j - wt[i - 1] , UpperCAmelCase )
if __name__ == "__main__":
__lowerCamelCase = [3, 2, 4, 4]
__lowerCamelCase = [4, 3, 2, 3]
__lowerCamelCase = 4
__lowerCamelCase = 6
__lowerCamelCase = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
__lowerCamelCase , __lowerCamelCase = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
__lowerCamelCase , __lowerCamelCase = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('optimal_value = ', optimal_solution)
print('An optimal subset corresponding to the optimal value', optimal_subset)
| 307
| 1
|
from __future__ import annotations
_lowerCAmelCase = 8.988E9 # units = N * m^s * C^-2
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case ):
_UpperCamelCase = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if distance < 0:
raise ValueError('''Distance cannot be negative''' )
if force == 0:
_UpperCamelCase = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
_UpperCamelCase = abs(__snake_case ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
_UpperCamelCase = abs(__snake_case ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
_UpperCamelCase = (COULOMBS_CONSTANT * charge_product / abs(__snake_case )) ** 0.5
return {"distance": distance}
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : List[Any] = {
"""configuration_albert""": ["""ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AlbertConfig""", """AlbertOnnxConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Any = ["""AlbertTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = ["""AlbertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = [
"""ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AlbertForMaskedLM""",
"""AlbertForMultipleChoice""",
"""AlbertForPreTraining""",
"""AlbertForQuestionAnswering""",
"""AlbertForSequenceClassification""",
"""AlbertForTokenClassification""",
"""AlbertModel""",
"""AlbertPreTrainedModel""",
"""load_tf_weights_in_albert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple = [
"""TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFAlbertForMaskedLM""",
"""TFAlbertForMultipleChoice""",
"""TFAlbertForPreTraining""",
"""TFAlbertForQuestionAnswering""",
"""TFAlbertForSequenceClassification""",
"""TFAlbertForTokenClassification""",
"""TFAlbertMainLayer""",
"""TFAlbertModel""",
"""TFAlbertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = [
"""FlaxAlbertForMaskedLM""",
"""FlaxAlbertForMultipleChoice""",
"""FlaxAlbertForPreTraining""",
"""FlaxAlbertForQuestionAnswering""",
"""FlaxAlbertForSequenceClassification""",
"""FlaxAlbertForTokenClassification""",
"""FlaxAlbertModel""",
"""FlaxAlbertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 372
| 0
|
import numpy as np
__A : Optional[int] =[
['''a''', '''b''', '''c''', '''d''', '''e'''],
['''f''', '''g''', '''h''', '''i''', '''k'''],
['''l''', '''m''', '''n''', '''o''', '''p'''],
['''q''', '''r''', '''s''', '''t''', '''u'''],
['''v''', '''w''', '''x''', '''y''', '''z'''],
]
class _SCREAMING_SNAKE_CASE :
def __init__( self ) -> Dict:
lowerCamelCase_ = np.array(a_ )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> str:
lowerCamelCase_ = np.where(letter == self.SQUARE )
lowerCamelCase_ = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> List[Any]:
lowerCamelCase_ = self.SQUARE[indexa - 1, indexa - 1]
return letter
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Any:
lowerCamelCase_ = message.lower()
lowerCamelCase_ = message.replace(" " , "" )
lowerCamelCase_ = message.replace("j" , "i" )
lowerCamelCase_ = np.empty((2, len(a_ )) )
for letter_index in range(len(a_ ) ):
lowerCamelCase_ = self.letter_to_numbers(message[letter_index] )
lowerCamelCase_ = numbers[0]
lowerCamelCase_ = numbers[1]
lowerCamelCase_ = first_step.reshape(2 * len(a_ ) )
lowerCamelCase_ = """"""
for numbers_index in range(len(a_ ) ):
lowerCamelCase_ = int(second_step[numbers_index * 2] )
lowerCamelCase_ = int(second_step[(numbers_index * 2) + 1] )
lowerCamelCase_ = self.numbers_to_letter(a_ , a_ )
lowerCamelCase_ = encoded_message + letter
return encoded_message
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> str:
lowerCamelCase_ = message.lower()
message.replace(" " , "" )
lowerCamelCase_ = np.empty(2 * len(a_ ) )
for letter_index in range(len(a_ ) ):
lowerCamelCase_ = self.letter_to_numbers(message[letter_index] )
lowerCamelCase_ = numbers[0]
lowerCamelCase_ = numbers[1]
lowerCamelCase_ = first_step.reshape((2, len(a_ )) )
lowerCamelCase_ = """"""
for numbers_index in range(len(a_ ) ):
lowerCamelCase_ = int(second_step[0, numbers_index] )
lowerCamelCase_ = int(second_step[1, numbers_index] )
lowerCamelCase_ = self.numbers_to_letter(a_ , a_ )
lowerCamelCase_ = decoded_message + letter
return decoded_message
| 706
|
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_( self ) -> None:
lowerCamelCase_ = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
lowerCamelCase_ = Vector()
def SCREAMING_SNAKE_CASE_( self ) -> None:
lowerCamelCase_ = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(lowercase ) , "(0,0,0,0,0,1)" )
def SCREAMING_SNAKE_CASE_( self ) -> None:
lowerCamelCase_ = Vector([1, 2, 3, 4] )
self.assertEqual(len(lowercase ) , 4 )
def SCREAMING_SNAKE_CASE_( self ) -> None:
lowerCamelCase_ = Vector([1, 2] )
lowerCamelCase_ = Vector([1, 2, 3, 4, 5] )
lowerCamelCase_ = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
lowerCamelCase_ = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.2_3_6 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.4_1_6 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.6_1_6 , 3 )
def SCREAMING_SNAKE_CASE_( self ) -> None:
lowerCamelCase_ = Vector([1, 2, 3] )
lowerCamelCase_ = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def SCREAMING_SNAKE_CASE_( self ) -> None:
lowerCamelCase_ = Vector([1, 2, 3] )
lowerCamelCase_ = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def SCREAMING_SNAKE_CASE_( self ) -> None:
lowerCamelCase_ = Vector([1, 2, 3] )
lowerCamelCase_ = Vector([2, -1, 4] ) # for test of dot product
lowerCamelCase_ = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , "(3.0,6.0,9.0)" )
self.assertEqual((a * b) , 0 )
def SCREAMING_SNAKE_CASE_( self ) -> None:
self.assertEqual(str(zero_vector(10 ) ).count("0" ) , 10 )
def SCREAMING_SNAKE_CASE_( self ) -> None:
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , "(0,1,0)" )
def SCREAMING_SNAKE_CASE_( self ) -> None:
lowerCamelCase_ = Vector([1, 2, 3] )
lowerCamelCase_ = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , lowercase , lowercase ) ) , "(3,4,7)" )
def SCREAMING_SNAKE_CASE_( self ) -> None:
lowerCamelCase_ = Vector([1, 0, 0, 0, 0, 0] )
lowerCamelCase_ = x.copy()
self.assertEqual(str(lowercase ) , str(lowercase ) )
def SCREAMING_SNAKE_CASE_( self ) -> None:
lowerCamelCase_ = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(lowercase ) , "(0,1,0)" )
def SCREAMING_SNAKE_CASE_( self ) -> None:
lowerCamelCase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("|1,2,3|\n|2,4,5|\n|6,7,8|\n" , str(lowercase ) )
def SCREAMING_SNAKE_CASE_( self ) -> None:
lowerCamelCase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCamelCase_ = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(lowercase , lowercase ) )
def SCREAMING_SNAKE_CASE_( self ) -> None:
lowerCamelCase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCamelCase_ = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(lowercase , lowercase ) )
def SCREAMING_SNAKE_CASE_( self ) -> None:
lowerCamelCase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def SCREAMING_SNAKE_CASE_( self ) -> None:
lowerCamelCase_ = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
lowerCamelCase_ = Vector([1, 2, 3] )
self.assertEqual("(14,32,50)" , str(a * x ) )
self.assertEqual("|2,4,6|\n|8,10,12|\n|14,16,18|\n" , str(a * 2 ) )
def SCREAMING_SNAKE_CASE_( self ) -> None:
lowerCamelCase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("|1,2,5|\n|2,4,5|\n|6,7,8|\n" , str(lowercase ) )
def SCREAMING_SNAKE_CASE_( self ) -> None:
lowerCamelCase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.0_1 )
def SCREAMING_SNAKE_CASE_( self ) -> None:
lowerCamelCase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCamelCase_ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("|2,4,10|\n|4,8,10|\n|12,14,18|\n" , str(a + b ) )
def SCREAMING_SNAKE_CASE_( self ) -> None:
lowerCamelCase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCamelCase_ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("|0,0,-4|\n|0,0,0|\n|0,0,-2|\n" , str(a - b ) )
def SCREAMING_SNAKE_CASE_( self ) -> None:
self.assertEqual(
"|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 313
| 0
|
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : complex , lowerCamelCase_ : str = "x" , lowerCamelCase_ : float = 10**-10 , lowerCamelCase_ : int = 1 , ) -> complex:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = symbols(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = lambdify(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = lambdify(lowerCamelCase_ , diff(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE_ : Dict = starting_point
while True:
if diff_function(lowerCamelCase_ ) != 0:
SCREAMING_SNAKE_CASE_ : Optional[int] = prev_guess - multiplicity * func(lowerCamelCase_ ) / diff_function(
lowerCamelCase_ )
else:
raise ZeroDivisionError('Could not find root' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
SCREAMING_SNAKE_CASE_ : str = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(F"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}""")
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
F"""{newton_raphson("log(y) - 1", 2, variable="y")}""",
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
F"""{newton_raphson("exp(x) - 1", 10, precision=0.0_05)}""",
)
# Find root of cos(x)
print(F"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
| 105
|
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
UpperCamelCase__ : Dict = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __init__( self ,*snake_case__ ,**snake_case__ ):
warnings.warn(
'The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ChineseCLIPImageProcessor instead.' ,snake_case__ ,)
super().__init__(*snake_case__ ,**snake_case__ )
| 105
| 1
|
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'''The `image_to_image.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionImg2ImgPipeline` instead.'''
)
| 717
|
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def UpperCamelCase__ ( lowerCAmelCase__ ):
lowercase = args.pruning_method
lowercase = args.threshold
lowercase = args.model_name_or_path.rstrip("""/""" )
lowercase = args.target_model_path
print(f"""Load fine-pruned model from {model_name_or_path}""" )
lowercase = torch.load(os.path.join(lowerCAmelCase__ ,"""pytorch_model.bin""" ) )
lowercase = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
lowercase = tensor
print(f"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
lowercase = tensor
print(f"""Copied layer {name}""" )
elif "bias" in name:
lowercase = tensor
print(f"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
lowercase = MagnitudeBinarizer.apply(inputs=lowerCAmelCase__ ,threshold=lowerCAmelCase__ )
lowercase = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
lowercase = name[:-6]
lowercase = model[f"""{prefix_}mask_scores"""]
lowercase = TopKBinarizer.apply(lowerCAmelCase__ ,lowerCAmelCase__ )
lowercase = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
lowercase = name[:-6]
lowercase = model[f"""{prefix_}mask_scores"""]
lowercase = ThresholdBinarizer.apply(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
lowercase = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
lowercase = name[:-6]
lowercase = model[f"""{prefix_}mask_scores"""]
lowercase , lowercase = -0.1, 1.1
lowercase = torch.sigmoid(lowerCAmelCase__ )
lowercase = s * (r - l) + l
lowercase = s_bar.clamp(min=0.0 ,max=1.0 )
lowercase = tensor * mask
print(f"""Pruned layer {name}""" )
else:
raise ValueError("""Unknown pruning method""" )
if target_model_path is None:
lowercase = os.path.join(
os.path.dirname(lowerCAmelCase__ ) ,f"""bertarized_{os.path.basename(lowerCAmelCase__ )}""" )
if not os.path.isdir(lowerCAmelCase__ ):
shutil.copytree(lowerCAmelCase__ ,lowerCAmelCase__ )
print(f"""\nCreated folder {target_model_path}""" )
torch.save(lowerCAmelCase__ ,os.path.join(lowerCAmelCase__ ,"""pytorch_model.bin""" ) )
print("""\nPruned model saved! See you later!""" )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[str] =argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
__SCREAMING_SNAKE_CASE : str =parser.parse_args()
main(args)
| 72
| 0
|
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase_ ( __snake_case , unittest.TestCase ):
_lowerCamelCase = DanceDiffusionPipeline
_lowerCamelCase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
_lowerCamelCase = PipelineTesterMixin.required_optional_params - {
'callback',
'latents',
'callback_steps',
'output_type',
'num_images_per_prompt',
}
_lowerCamelCase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
_lowerCamelCase = False
_lowerCamelCase = False
def UpperCamelCase ( self ):
torch.manual_seed(0 )
_snake_case : str = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16_000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=lowercase_ , use_timestep_embedding=lowercase_ , time_embedding_type="fourier" , mid_block_type="UNetMidBlock1D" , down_block_types=("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , up_block_types=("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , )
_snake_case : Any = IPNDMScheduler()
_snake_case : int = {
"unet": unet,
"scheduler": scheduler,
}
return components
def UpperCamelCase ( self , lowercase_ , lowercase_=0 ):
if str(lowercase_ ).startswith("mps" ):
_snake_case : int = torch.manual_seed(lowercase_ )
else:
_snake_case : List[Any] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
_snake_case : Tuple = {
"batch_size": 1,
"generator": generator,
"num_inference_steps": 4,
}
return inputs
def UpperCamelCase ( self ):
_snake_case : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
_snake_case : List[Any] = self.get_dummy_components()
_snake_case : Any = DanceDiffusionPipeline(**lowercase_ )
_snake_case : List[str] = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
_snake_case : Dict = self.get_dummy_inputs(lowercase_ )
_snake_case : List[str] = pipe(**lowercase_ )
_snake_case : Optional[int] = output.audios
_snake_case : Any = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_snake_case : List[Any] = np.array([-0.7_265, 1.0_000, -0.8_388, 0.1_175, 0.9_498, -1.0_000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def UpperCamelCase ( self ):
return super().test_save_load_local()
@skip_mps
def UpperCamelCase ( self ):
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def UpperCamelCase ( self ):
return super().test_save_load_optional_components()
@skip_mps
def UpperCamelCase ( self ):
return super().test_attention_slicing_forward_pass()
def UpperCamelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
def UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self ):
_snake_case : str = torch_device
_snake_case : Dict = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" )
_snake_case : int = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
_snake_case : Optional[int] = torch.manual_seed(0 )
_snake_case : List[str] = pipe(generator=lowercase_ , num_inference_steps=100 , audio_length_in_s=4.096 )
_snake_case : Dict = output.audios
_snake_case : Dict = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_snake_case : Optional[int] = np.array([-0.0_192, -0.0_231, -0.0_318, -0.0_059, 0.0_002, -0.0_020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase ( self ):
_snake_case : Optional[Any] = torch_device
_snake_case : List[str] = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" , torch_dtype=torch.floataa )
_snake_case : int = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
_snake_case : Dict = torch.manual_seed(0 )
_snake_case : Any = pipe(generator=lowercase_ , num_inference_steps=100 , audio_length_in_s=4.096 )
_snake_case : Optional[Any] = output.audios
_snake_case : List[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_snake_case : str = np.array([-0.0_367, -0.0_488, -0.0_771, -0.0_525, -0.0_444, -0.0_341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 670
|
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
class lowercase_ ( __snake_case ):
_lowerCamelCase = 'linear'
_lowerCamelCase = 'cosine'
_lowerCamelCase = 'cosine_with_restarts'
_lowerCamelCase = 'polynomial'
_lowerCamelCase = 'constant'
_lowerCamelCase = 'constant_with_warmup'
_lowerCamelCase = 'piecewise_constant'
def snake_case (__lowercase , __lowercase = -1 ) -> List[Any]:
'''simple docstring'''
return LambdaLR(__lowercase , lambda __lowercase : 1 , last_epoch=__lowercase )
def snake_case (__lowercase , __lowercase , __lowercase = -1 ) -> List[str]:
'''simple docstring'''
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1.0 , __lowercase ) )
return 1.0
return LambdaLR(__lowercase , __lowercase , last_epoch=__lowercase )
def snake_case (__lowercase , __lowercase , __lowercase = -1 ) -> Optional[int]:
'''simple docstring'''
_snake_case : Optional[Any] = {}
_snake_case : Optional[int] = step_rules.split("," )
for rule_str in rule_list[:-1]:
_snake_case ,_snake_case : str = rule_str.split(":" )
_snake_case : Dict = int(__lowercase )
_snake_case : List[str] = float(__lowercase )
_snake_case : Tuple = value
_snake_case : str = float(rule_list[-1] )
def create_rules_function(__lowercase , __lowercase ):
def rule_func(__lowercase ) -> float:
_snake_case : List[str] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__lowercase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
_snake_case : int = create_rules_function(__lowercase , __lowercase )
return LambdaLR(__lowercase , __lowercase , last_epoch=__lowercase )
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase=-1 ) -> List[str]:
'''simple docstring'''
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1 , __lowercase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__lowercase , __lowercase , __lowercase )
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase = 0.5 , __lowercase = -1 ) -> Dict:
'''simple docstring'''
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1 , __lowercase ) )
_snake_case : Optional[int] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__lowercase ) * 2.0 * progress )) )
return LambdaLR(__lowercase , __lowercase , __lowercase )
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase = 1 , __lowercase = -1 ) -> Optional[int]:
'''simple docstring'''
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1 , __lowercase ) )
_snake_case : Any = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__lowercase ) * progress) % 1.0) )) )
return LambdaLR(__lowercase , __lowercase , __lowercase )
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase=1e-7 , __lowercase=1.0 , __lowercase=-1 ) -> List[Any]:
'''simple docstring'''
_snake_case : List[Any] = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1 , __lowercase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
_snake_case : Tuple = lr_init - lr_end
_snake_case : Any = num_training_steps - num_warmup_steps
_snake_case : Optional[int] = 1 - (current_step - num_warmup_steps) / decay_steps
_snake_case : Optional[Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__lowercase , __lowercase , __lowercase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def snake_case (__lowercase , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = 1 , __lowercase = 1.0 , __lowercase = -1 , ) -> List[Any]:
'''simple docstring'''
_snake_case : Any = SchedulerType(__lowercase )
_snake_case : Union[str, Any] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__lowercase , last_epoch=__lowercase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__lowercase , step_rules=__lowercase , last_epoch=__lowercase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__lowercase , num_warmup_steps=__lowercase , last_epoch=__lowercase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__lowercase , num_warmup_steps=__lowercase , num_training_steps=__lowercase , num_cycles=__lowercase , last_epoch=__lowercase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__lowercase , num_warmup_steps=__lowercase , num_training_steps=__lowercase , power=__lowercase , last_epoch=__lowercase , )
return schedule_func(
__lowercase , num_warmup_steps=__lowercase , num_training_steps=__lowercase , last_epoch=__lowercase )
| 670
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : Union[str, Any] = {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Tuple = ['XLNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = ['XLNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Dict = [
'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLNetForMultipleChoice',
'XLNetForQuestionAnswering',
'XLNetForQuestionAnsweringSimple',
'XLNetForSequenceClassification',
'XLNetForTokenClassification',
'XLNetLMHeadModel',
'XLNetModel',
'XLNetPreTrainedModel',
'load_tf_weights_in_xlnet',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Dict = [
'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLNetForMultipleChoice',
'TFXLNetForQuestionAnsweringSimple',
'TFXLNetForSequenceClassification',
'TFXLNetForTokenClassification',
'TFXLNetLMHeadModel',
'TFXLNetMainLayer',
'TFXLNetModel',
'TFXLNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 701
|
'''simple docstring'''
from random import randint
from tempfile import TemporaryFile
import numpy as np
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : List[Any] = 0
if start < end:
_snake_case : List[Any] = randint(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : Any = a[end]
_snake_case : List[str] = a[pivot]
_snake_case : Optional[int] = temp
_snake_case , _snake_case : List[Any] = _in_place_partition(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
count += _in_place_quick_sort(lowerCAmelCase_ , lowerCAmelCase_ , p - 1 )
count += _in_place_quick_sort(lowerCAmelCase_ , p + 1 , lowerCAmelCase_ )
return count
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[Any] = 0
_snake_case : Optional[int] = randint(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : Tuple = a[end]
_snake_case : Optional[Any] = a[pivot]
_snake_case : Union[str, Any] = temp
_snake_case : Union[str, Any] = start - 1
for index in range(lowerCAmelCase_ , lowerCAmelCase_ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
_snake_case : Optional[int] = new_pivot_index + 1
_snake_case : Optional[Any] = a[new_pivot_index]
_snake_case : Tuple = a[index]
_snake_case : str = temp
_snake_case : Any = a[new_pivot_index + 1]
_snake_case : str = a[end]
_snake_case : Optional[int] = temp
return new_pivot_index + 1, count
UpperCAmelCase : Dict = TemporaryFile()
UpperCAmelCase : Dict = 1_0_0 # 1000 elements are to be sorted
UpperCAmelCase, UpperCAmelCase : str = 0, 1 # mean and standard deviation
UpperCAmelCase : Optional[Any] = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
UpperCAmelCase : int = np.load(outfile)
UpperCAmelCase : Optional[int] = len(M) - 1
UpperCAmelCase : str = _in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z)
| 47
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.