code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class __A :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = parent
lowerCamelCase__ = 1_3
lowerCamelCase__ = 7
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = 9_9
lowerCamelCase__ = 3_2
lowerCamelCase__ = 2
lowerCamelCase__ = 4
lowerCamelCase__ = 3_7
lowerCamelCase__ = '''gelu'''
lowerCamelCase__ = 0.1
lowerCamelCase__ = 0.1
lowerCamelCase__ = 5_1_2
lowerCamelCase__ = 1_6
lowerCamelCase__ = 2
lowerCamelCase__ = 0.02
lowerCamelCase__ = 3
lowerCamelCase__ = 4
lowerCamelCase__ = None
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self ):
'''simple docstring'''
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = self.prepare_config_and_inputs()
lowerCamelCase__ = True
lowerCamelCase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = TFEsmModel(config=__lowerCAmelCase )
lowerCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowerCamelCase__ = model(__lowerCAmelCase )
lowerCamelCase__ = [input_ids, input_mask]
lowerCamelCase__ = model(__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = True
lowerCamelCase__ = TFEsmModel(config=__lowerCAmelCase )
lowerCamelCase__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''encoder_hidden_states''': encoder_hidden_states,
'''encoder_attention_mask''': encoder_attention_mask,
}
lowerCamelCase__ = model(__lowerCAmelCase )
lowerCamelCase__ = [input_ids, input_mask]
lowerCamelCase__ = model(__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase )
# Also check the case where encoder outputs are not passed
lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = TFEsmForMaskedLM(config=__lowerCAmelCase )
lowerCamelCase__ = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = TFEsmForTokenClassification(config=__lowerCAmelCase )
lowerCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowerCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = config_and_inputs
lowerCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ = (
{
"""feature-extraction""": TFEsmModel,
"""fill-mask""": TFEsmForMaskedLM,
"""text-classification""": TFEsmForSequenceClassification,
"""token-classification""": TFEsmForTokenClassification,
"""zero-shot""": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = TFEsmModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFEsmModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@unittest.skip('''Protein models do not support embedding resizing.''' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip('''Protein models do not support embedding resizing.''' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(__lowerCAmelCase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowerCamelCase__ = model.get_bias()
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
for k, v in name.items():
assert isinstance(__lowerCAmelCase , tf.Variable )
else:
lowerCamelCase__ = model.get_output_embeddings()
assert x is None
lowerCamelCase__ = model.get_bias()
assert name is None
@require_tf
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = TFEsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
lowerCamelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase__ = model(__lowerCAmelCase )[0]
lowerCamelCase__ = [1, 6, 3_3]
self.assertEqual(list(output.numpy().shape ) , __lowerCAmelCase )
# compare the actual values for a slice.
lowerCamelCase__ = tf.constant(
[
[
[8.92_1518, -10.58_9814, -6.467_1307],
[-6.396_7156, -13.91_1377, -1.121_1915],
[-7.78_1247, -13.95_1557, -3.74_0592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = TFEsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
lowerCamelCase__ = tf.constant([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
lowerCamelCase__ = model(__lowerCAmelCase )[0]
# compare the actual values for a slice.
lowerCamelCase__ = tf.constant(
[
[
[0.1444_3092, 0.5412_5327, 0.324_7739],
[0.3034_0484, 0.0052_6676, 0.3107_7722],
[0.3227_8043, -0.2498_7096, 0.341_4628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 29
|
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowerCAmelCase__(__snake_case ) -> Union[str, Any]:
'''simple docstring'''
def wrapper(*__snake_case ,**__snake_case ):
lowerCamelCase__ = timeit.default_timer()
lowerCamelCase__ = func(*__snake_case ,**__snake_case )
lowerCamelCase__ = timeit.default_timer() - starttime
return delta
lowerCamelCase__ = func.__name__
return wrapper
def lowerCAmelCase__(__snake_case ,__snake_case=100 ,__snake_case=None ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = []
lowerCamelCase__ = seq_shapes or {}
for i in range(__snake_case ):
lowerCamelCase__ = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(__snake_case ,_ArrayXD ):
lowerCamelCase__ = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(__snake_case ,datasets.Value ):
if v.dtype == "string":
lowerCamelCase__ = '''The small grey turtle was surprisingly fast when challenged.'''
else:
lowerCamelCase__ = np.random.randint(10 ,size=1 ).astype(v.dtype ).item()
elif isinstance(__snake_case ,datasets.Sequence ):
while isinstance(__snake_case ,datasets.Sequence ):
lowerCamelCase__ = v.feature
lowerCamelCase__ = seq_shapes[k]
lowerCamelCase__ = np.random.rand(*__snake_case ).astype(v.dtype )
lowerCamelCase__ = data
dummy_data.append((i, example) )
return dummy_data
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case=100 ,__snake_case=None ) -> str:
'''simple docstring'''
lowerCamelCase__ = generate_examples(__snake_case ,num_examples=__snake_case ,seq_shapes=__snake_case )
with ArrowWriter(features=__snake_case ,path=__snake_case ) as writer:
for key, record in dummy_data:
lowerCamelCase__ = features.encode_example(__snake_case )
writer.write(__snake_case )
lowerCamelCase__ , lowerCamelCase__ = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F'Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.' )
lowerCamelCase__ = datasets.Dataset.from_file(filename=__snake_case ,info=datasets.DatasetInfo(features=__snake_case ) )
return dataset
| 29
| 1
|
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> Any:
'''simple docstring'''
lowerCamelCase__ = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowerCamelCase__ = {
'''wmt16-en-de-dist-12-1''': [2_8.3, 2_7.5_2],
'''wmt16-en-de-dist-6-1''': [2_7.4, 2_7.1_1],
'''wmt16-en-de-12-1''': [2_6.9, 2_5.7_5],
}
lowerCamelCase__ = F'{src_lang}-{tgt_lang}'
lowerCamelCase__ = F'\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "allenai/{model_name}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n'
model_card_dir.mkdir(parents=__snake_case ,exist_ok=__snake_case )
lowerCamelCase__ = os.path.join(__snake_case ,'''README.md''' )
print(F'Generating {path}' )
with open(__snake_case ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(__snake_case )
# make sure we are under the root of the project
_a = Path(__file__).resolve().parent.parent.parent
_a = repo_dir / "model_cards"
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
_a = model_cards_dir / "allenai" / model_name
write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
| 29
|
def lowerCAmelCase__(__snake_case ) -> int:
'''simple docstring'''
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''' )
for cell_n in range(1 ,len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
lowerCamelCase__ = grid[0]
for row_n in range(1 ,len(__snake_case ) ):
lowerCamelCase__ = grid[row_n]
lowerCamelCase__ = fill_row(__snake_case ,__snake_case )
lowerCamelCase__ = grid[row_n]
return grid[-1][-1]
def lowerCAmelCase__(__snake_case ,__snake_case ) -> list:
'''simple docstring'''
current_row[0] += row_above[0]
for cell_n in range(1 ,len(__snake_case ) ):
current_row[cell_n] += min(current_row[cell_n - 1] ,row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29
| 1
|
def lowerCAmelCase__(__snake_case ,__snake_case ) -> float:
'''simple docstring'''
return base * power(__snake_case ,(exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print("Raise base to the power of exponent using recursion...")
_a = int(input("Enter the base: ").strip())
_a = int(input("Enter the exponent: ").strip())
_a = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
_a = 1 / result
print(f"""{base} to the power of {exponent} is {result}""")
| 29
|
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
_a = logging.get_logger(__name__)
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase_ = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(glue_processors.keys() )} )
lowerCAmelCase_ = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
lowerCAmelCase_ = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.task_name.lower()
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = """train"""
lowerCAmelCase_ = """dev"""
lowerCAmelCase_ = """test"""
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = Split.train , __lowerCAmelCase = None , ):
'''simple docstring'''
warnings.warn(
'''This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' , __lowerCAmelCase , )
lowerCamelCase__ = args
lowerCamelCase__ = glue_processors[args.task_name]()
lowerCamelCase__ = glue_output_modes[args.task_name]
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
try:
lowerCamelCase__ = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
# Load data features from cache or dataset file
lowerCamelCase__ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , )
lowerCamelCase__ = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCamelCase__ , lowerCamelCase__ = label_list[2], label_list[1]
lowerCamelCase__ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase__ = cached_features_file + '''.lock'''
with FileLock(__lowerCAmelCase ):
if os.path.exists(__lowerCAmelCase ) and not args.overwrite_cache:
lowerCamelCase__ = time.time()
lowerCamelCase__ = torch.load(__lowerCAmelCase )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
else:
logger.info(F'Creating features from dataset file at {args.data_dir}' )
if mode == Split.dev:
lowerCamelCase__ = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
lowerCamelCase__ = self.processor.get_test_examples(args.data_dir )
else:
lowerCamelCase__ = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
lowerCamelCase__ = examples[:limit_length]
lowerCamelCase__ = glue_convert_examples_to_features(
__lowerCAmelCase , __lowerCAmelCase , max_length=args.max_seq_length , label_list=__lowerCAmelCase , output_mode=self.output_mode , )
lowerCamelCase__ = time.time()
torch.save(self.features , __lowerCAmelCase )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self , __lowerCAmelCase ):
'''simple docstring'''
return self.features[i]
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.label_list
| 29
| 1
|
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def lowerCAmelCase__(__snake_case ) -> Any:
'''simple docstring'''
lowerCamelCase__ = torch.exp(__snake_case )
lowerCamelCase__ = torch.sum(__snake_case ,dim=1 ) # sum of exp(x_i)
lowerCamelCase__ = torch.sum(x * exp_x ,dim=1 ) # sum of x_i * exp(x_i)
return torch.log(__snake_case ) - B / A
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ = config.output_attentions
lowerCamelCase__ = config.output_hidden_states
lowerCamelCase__ = nn.ModuleList([BertLayer(__lowerCAmelCase ) for _ in range(config.num_hidden_layers )] )
lowerCamelCase__ = nn.ModuleList([BertHighway(__lowerCAmelCase ) for _ in range(config.num_hidden_layers )] )
lowerCamelCase__ = [-1 for _ in range(config.num_hidden_layers )]
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
if (type(__lowerCAmelCase ) is float) or (type(__lowerCAmelCase ) is int):
for i in range(len(self.early_exit_entropy ) ):
lowerCamelCase__ = x
else:
lowerCamelCase__ = x
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , ):
'''simple docstring'''
lowerCamelCase__ = ()
lowerCamelCase__ = ()
lowerCamelCase__ = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
lowerCamelCase__ = all_hidden_states + (hidden_states,)
lowerCamelCase__ = layer_module(
__lowerCAmelCase , __lowerCAmelCase , head_mask[i] , __lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = layer_outputs[0]
if self.output_attentions:
lowerCamelCase__ = all_attentions + (layer_outputs[1],)
lowerCamelCase__ = (hidden_states,)
if self.output_hidden_states:
lowerCamelCase__ = current_outputs + (all_hidden_states,)
if self.output_attentions:
lowerCamelCase__ = current_outputs + (all_attentions,)
lowerCamelCase__ = self.highway[i](__lowerCAmelCase )
# logits, pooled_output
if not self.training:
lowerCamelCase__ = highway_exit[0]
lowerCamelCase__ = entropy(__lowerCAmelCase )
lowerCamelCase__ = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
lowerCamelCase__ = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
lowerCamelCase__ = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(__lowerCAmelCase , i + 1 )
else:
lowerCamelCase__ = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
lowerCamelCase__ = all_hidden_states + (hidden_states,)
lowerCamelCase__ = (hidden_states,)
if self.output_hidden_states:
lowerCamelCase__ = outputs + (all_hidden_states,)
if self.output_attentions:
lowerCamelCase__ = outputs + (all_attentions,)
lowerCamelCase__ = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"""The Bert Model transformer with early exiting (DeeBERT). """ , lowerCAmelCase , )
class __A ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase ):
'''simple docstring'''
super().__init__(__lowerCAmelCase )
lowerCamelCase__ = config
lowerCamelCase__ = BertEmbeddings(__lowerCAmelCase )
lowerCamelCase__ = DeeBertEncoder(__lowerCAmelCase )
lowerCamelCase__ = BertPooler(__lowerCAmelCase )
self.init_weights()
def __lowerCamelCase ( self ):
'''simple docstring'''
self.encoder.init_highway_pooler(self.pooler )
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.embeddings.word_embeddings
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = value
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(__lowerCAmelCase )
@add_start_docstrings_to_model_forward(__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , ):
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
lowerCamelCase__ = input_ids.size()
elif inputs_embeds is not None:
lowerCamelCase__ = inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
lowerCamelCase__ = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowerCamelCase__ = torch.ones(__lowerCAmelCase , device=__lowerCAmelCase )
if encoder_attention_mask is None:
lowerCamelCase__ = torch.ones(__lowerCAmelCase , device=__lowerCAmelCase )
if token_type_ids is None:
lowerCamelCase__ = torch.zeros(__lowerCAmelCase , dtype=torch.long , device=__lowerCAmelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowerCamelCase__ = self.get_extended_attention_mask(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
lowerCamelCase__ = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
lowerCamelCase__ = encoder_attention_mask[:, None, None, :]
lowerCamelCase__ = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
lowerCamelCase__ = (1.0 - encoder_extended_attention_mask) * -1_0000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowerCamelCase__ = self.get_head_mask(__lowerCAmelCase , self.config.num_hidden_layers )
lowerCamelCase__ = self.embeddings(
input_ids=__lowerCAmelCase , position_ids=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , inputs_embeds=__lowerCAmelCase )
lowerCamelCase__ = self.encoder(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , head_mask=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , encoder_attention_mask=__lowerCAmelCase , )
lowerCamelCase__ = encoder_outputs[0]
lowerCamelCase__ = self.pooler(__lowerCAmelCase )
lowerCamelCase__ = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class __A ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = message
lowerCamelCase__ = exit_layer # start from 1!
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ = BertPooler(__lowerCAmelCase )
lowerCamelCase__ = nn.Dropout(config.hidden_dropout_prob )
lowerCamelCase__ = nn.Linear(config.hidden_size , config.num_labels )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = encoder_outputs[0]
lowerCamelCase__ = self.pooler(__lowerCAmelCase )
# "return" pooler_output
# BertModel
lowerCamelCase__ = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
lowerCamelCase__ = bmodel_output[1]
lowerCamelCase__ = self.dropout(__lowerCAmelCase )
lowerCamelCase__ = self.classifier(__lowerCAmelCase )
return logits, pooled_output
@add_start_docstrings(
"""Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. """ , lowerCAmelCase , )
class __A ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase ):
'''simple docstring'''
super().__init__(__lowerCAmelCase )
lowerCamelCase__ = config.num_labels
lowerCamelCase__ = config.num_hidden_layers
lowerCamelCase__ = DeeBertModel(__lowerCAmelCase )
lowerCamelCase__ = nn.Dropout(config.hidden_dropout_prob )
lowerCamelCase__ = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=-1 , __lowerCAmelCase=False , ):
'''simple docstring'''
lowerCamelCase__ = self.num_layers
try:
lowerCamelCase__ = self.bert(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , position_ids=__lowerCAmelCase , head_mask=__lowerCAmelCase , inputs_embeds=__lowerCAmelCase , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
lowerCamelCase__ = outputs[1]
lowerCamelCase__ = self.dropout(__lowerCAmelCase )
lowerCamelCase__ = self.classifier(__lowerCAmelCase )
lowerCamelCase__ = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
lowerCamelCase__ = e.message
lowerCamelCase__ = e.exit_layer
lowerCamelCase__ = outputs[0]
if not self.training:
lowerCamelCase__ = entropy(__lowerCAmelCase )
lowerCamelCase__ = []
lowerCamelCase__ = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
lowerCamelCase__ = MSELoss()
lowerCamelCase__ = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
lowerCamelCase__ = CrossEntropyLoss()
lowerCamelCase__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
lowerCamelCase__ = []
for highway_exit in outputs[-1]:
lowerCamelCase__ = highway_exit[0]
if not self.training:
highway_logits_all.append(__lowerCAmelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
lowerCamelCase__ = MSELoss()
lowerCamelCase__ = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
lowerCamelCase__ = CrossEntropyLoss()
lowerCamelCase__ = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(__lowerCAmelCase )
if train_highway:
lowerCamelCase__ = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
lowerCamelCase__ = (loss,) + outputs
if not self.training:
lowerCamelCase__ = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
lowerCamelCase__ = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 29
|
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
_a = datasets.logging.get_logger(__name__)
_a = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
_a = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
_a = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case=False ,__snake_case=False ,__snake_case=True ,__snake_case=False ,__snake_case="dummy_doc" ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = {doc: key_lines}
lowerCamelCase__ = {doc: sys_lines}
lowerCamelCase__ = {}
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ , lowerCamelCase__ = reader.get_doc_mentions(__snake_case ,key_doc_lines[doc] ,__snake_case )
key_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase__ = reader.set_annotated_parse_trees(__snake_case ,key_doc_lines[doc] ,__snake_case ,__snake_case )
lowerCamelCase__ , lowerCamelCase__ = reader.get_doc_mentions(__snake_case ,sys_doc_lines[doc] ,__snake_case )
sys_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase__ = reader.set_annotated_parse_trees(__snake_case ,key_doc_lines[doc] ,__snake_case ,__snake_case )
if remove_nested:
lowerCamelCase__ , lowerCamelCase__ = reader.remove_nested_coref_mentions(__snake_case ,__snake_case )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowerCamelCase__ , lowerCamelCase__ = reader.remove_nested_coref_mentions(__snake_case ,__snake_case )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowerCamelCase__ = reader.get_mention_assignments(__snake_case ,__snake_case )
lowerCamelCase__ = reader.get_mention_assignments(__snake_case ,__snake_case )
lowerCamelCase__ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
F'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' )
logger.info(
'''Number of resulting singleton clusters in the key '''
F'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' )
if not keep_singletons:
logger.info(
F'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '
'''files, respectively''' )
return doc_coref_infos
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> str:
'''simple docstring'''
lowerCamelCase__ = get_coref_infos(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
lowerCamelCase__ = {}
lowerCamelCase__ = 0
lowerCamelCase__ = 0
for name, metric in metrics:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = evaluator.evaluate_documents(__snake_case ,__snake_case ,beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'{name}/recall': recall, F'{name}/precision': precision, F'{name}/f1': fa} )
logger.info(
name.ljust(10 ) ,F'Recall: {recall * 100:.2f}' ,F' Precision: {precision * 100:.2f}' ,F' F1: {fa * 100:.2f}' ,)
if conll_subparts_num == 3:
lowerCamelCase__ = (conll / 3) * 100
logger.info(F'CoNLL score: {conll:.2f}' )
output_scores.update({'''conll_score''': conll} )
return output_scores
def lowerCAmelCase__(__snake_case ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
lowerCamelCase__ = line.split()[5]
if not parse_col == "-":
lowerCamelCase__ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
'''simple docstring'''
def __lowerCamelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Sequence(datasets.Value('''string''' ) ),
} ) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] , )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=False ):
'''simple docstring'''
lowerCamelCase__ = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
lowerCamelCase__ = util.check_gold_parse_annotation(__lowerCAmelCase )
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowerCamelCase__ = evaluate(
key_lines=__lowerCAmelCase , sys_lines=__lowerCAmelCase , metrics=__lowerCAmelCase , NP_only=__lowerCAmelCase , remove_nested=__lowerCAmelCase , keep_singletons=__lowerCAmelCase , min_span=__lowerCAmelCase , )
return score
| 29
| 1
|
from ...processing_utils import ProcessorMixin
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = """SpeechT5FeatureExtractor"""
lowerCAmelCase_ = """SpeechT5Tokenizer"""
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
def __call__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = kwargs.pop('''audio''' , __lowerCAmelCase )
lowerCamelCase__ = kwargs.pop('''text''' , __lowerCAmelCase )
lowerCamelCase__ = kwargs.pop('''text_target''' , __lowerCAmelCase )
lowerCamelCase__ = kwargs.pop('''audio_target''' , __lowerCAmelCase )
lowerCamelCase__ = kwargs.pop('''sampling_rate''' , __lowerCAmelCase )
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''' )
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''' )
if audio is not None:
lowerCamelCase__ = self.feature_extractor(__lowerCAmelCase , *__lowerCAmelCase , sampling_rate=__lowerCAmelCase , **__lowerCAmelCase )
elif text is not None:
lowerCamelCase__ = self.tokenizer(__lowerCAmelCase , **__lowerCAmelCase )
else:
lowerCamelCase__ = None
if audio_target is not None:
lowerCamelCase__ = self.feature_extractor(audio_target=__lowerCAmelCase , *__lowerCAmelCase , sampling_rate=__lowerCAmelCase , **__lowerCAmelCase )
lowerCamelCase__ = targets['''input_values''']
elif text_target is not None:
lowerCamelCase__ = self.tokenizer(__lowerCAmelCase , **__lowerCAmelCase )
lowerCamelCase__ = targets['''input_ids''']
else:
lowerCamelCase__ = None
if inputs is None:
return targets
if targets is not None:
lowerCamelCase__ = labels
lowerCamelCase__ = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
lowerCamelCase__ = decoder_attention_mask
return inputs
def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = kwargs.pop('''input_values''' , __lowerCAmelCase )
lowerCamelCase__ = kwargs.pop('''input_ids''' , __lowerCAmelCase )
lowerCamelCase__ = kwargs.pop('''labels''' , __lowerCAmelCase )
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''' )
if input_values is not None:
lowerCamelCase__ = self.feature_extractor.pad(__lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase )
elif input_ids is not None:
lowerCamelCase__ = self.tokenizer.pad(__lowerCAmelCase , **__lowerCAmelCase )
else:
lowerCamelCase__ = None
if labels is not None:
if "input_ids" in labels or (isinstance(__lowerCAmelCase , __lowerCAmelCase ) and "input_ids" in labels[0]):
lowerCamelCase__ = self.tokenizer.pad(__lowerCAmelCase , **__lowerCAmelCase )
lowerCamelCase__ = targets['''input_ids''']
else:
lowerCamelCase__ = self.feature_extractor.feature_size
lowerCamelCase__ = self.feature_extractor.num_mel_bins
lowerCamelCase__ = self.feature_extractor.pad(__lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase )
lowerCamelCase__ = feature_size_hack
lowerCamelCase__ = targets['''input_values''']
else:
lowerCamelCase__ = None
if inputs is None:
return targets
if targets is not None:
lowerCamelCase__ = labels
lowerCamelCase__ = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
lowerCamelCase__ = decoder_attention_mask
return inputs
def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
| 29
|
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
_a = open # noqa: we just need to have a builtin inside this module to test it properly
| 29
| 1
|
def lowerCAmelCase__(__snake_case ) -> int:
'''simple docstring'''
if not isinstance(__snake_case ,__snake_case ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1 ,input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29
|
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
_a = logging.get_logger(__name__)
class __A :
'''simple docstring'''
lowerCAmelCase_ = None
@experimental
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> Tuple:
'''simple docstring'''
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
return _map_with_joblib(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> int:
'''simple docstring'''
lowerCamelCase__ = num_proc if num_proc <= len(__snake_case ) else len(__snake_case )
lowerCamelCase__ = [] # We organize the splits ourselve (contiguous splits)
for index in range(__snake_case ):
lowerCamelCase__ = len(__snake_case ) // num_proc
lowerCamelCase__ = len(__snake_case ) % num_proc
lowerCamelCase__ = div * index + min(__snake_case ,__snake_case )
lowerCamelCase__ = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(__snake_case ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
F'Error dividing inputs iterable among processes. '
F'Total number of objects {len(__snake_case )}, '
F'length: {sum(len(i[1] ) for i in split_kwds )}' )
logger.info(
F'Spawning {num_proc} processes for {len(__snake_case )} objects in slices of {[len(i[1] ) for i in split_kwds]}' )
lowerCamelCase__ , lowerCamelCase__ = None, None
if not disable_tqdm:
lowerCamelCase__ , lowerCamelCase__ = (RLock(),), tqdm.set_lock
with Pool(__snake_case ,initargs=__snake_case ,initializer=__snake_case ) as pool:
lowerCamelCase__ = pool.map(__snake_case ,__snake_case )
logger.info(F'Finished {num_proc} processes' )
lowerCamelCase__ = [obj for proc_res in mapped for obj in proc_res]
logger.info(F'Unpacked {len(__snake_case )} objects' )
return mapped
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> List[str]:
'''simple docstring'''
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name ,n_jobs=__snake_case ):
return joblib.Parallel()(
joblib.delayed(__snake_case )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def lowerCAmelCase__(__snake_case ) -> int:
'''simple docstring'''
lowerCamelCase__ = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
lowerCamelCase__ = None
| 29
| 1
|
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
_a = open # noqa: we just need to have a builtin inside this module to test it properly
| 29
|
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = 42
class __A ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
@register_to_config
def __init__( self , __lowerCAmelCase = 1_6 , __lowerCAmelCase = 8_8 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = 1 , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 3_2 , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = "geglu" , __lowerCAmelCase = True , __lowerCAmelCase = True , ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = attention_head_dim
lowerCamelCase__ = num_attention_heads * attention_head_dim
lowerCamelCase__ = in_channels
lowerCamelCase__ = torch.nn.GroupNorm(num_groups=__lowerCAmelCase , num_channels=__lowerCAmelCase , eps=1E-6 , affine=__lowerCAmelCase )
lowerCamelCase__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase )
# 3. Define transformers blocks
lowerCamelCase__ = nn.ModuleList(
[
BasicTransformerBlock(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , dropout=__lowerCAmelCase , cross_attention_dim=__lowerCAmelCase , activation_fn=__lowerCAmelCase , attention_bias=__lowerCAmelCase , double_self_attention=__lowerCAmelCase , norm_elementwise_affine=__lowerCAmelCase , )
for d in range(__lowerCAmelCase )
] )
lowerCamelCase__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=1 , __lowerCAmelCase=None , __lowerCAmelCase = True , ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = hidden_states.shape
lowerCamelCase__ = batch_frames // num_frames
lowerCamelCase__ = hidden_states
lowerCamelCase__ = hidden_states[None, :].reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
lowerCamelCase__ = self.norm(__lowerCAmelCase )
lowerCamelCase__ = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , __lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = self.proj_in(__lowerCAmelCase )
# 2. Blocks
for block in self.transformer_blocks:
lowerCamelCase__ = block(
__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , timestep=__lowerCAmelCase , cross_attention_kwargs=__lowerCAmelCase , class_labels=__lowerCAmelCase , )
# 3. Output
lowerCamelCase__ = self.proj_out(__lowerCAmelCase )
lowerCamelCase__ = (
hidden_states[None, None, :]
.reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
lowerCamelCase__ = hidden_states.reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=__lowerCAmelCase )
| 29
| 1
|
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_a = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class __A ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCAmelCase_ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowerCAmelCase_ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowerCAmelCase_ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''pt''' )
lowerCamelCase__ = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
lowerCamelCase__ = text_classifier('''This is great !''' , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}] )
lowerCamelCase__ = text_classifier(['''This is great !''', '''This is bad'''] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
] , )
lowerCamelCase__ = text_classifier('''This is great !''' , top_k=1 )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
# Legacy behavior
lowerCamelCase__ = text_classifier('''This is great !''' , return_all_scores=__lowerCAmelCase )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
lowerCamelCase__ = text_classifier('''This is great !''' , return_all_scores=__lowerCAmelCase )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}]] )
lowerCamelCase__ = text_classifier(['''This is great !''', '''Something else'''] , return_all_scores=__lowerCAmelCase )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
] , )
lowerCamelCase__ = text_classifier(['''This is great !''', '''Something else'''] , return_all_scores=__lowerCAmelCase )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
{'''label''': '''LABEL_0''', '''score''': 0.504},
{'''label''': '''LABEL_0''', '''score''': 0.504},
] , )
@require_torch
def __lowerCamelCase ( self ):
'''simple docstring'''
import torch
lowerCamelCase__ = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''pt''' , device=torch.device('''cpu''' ) , )
lowerCamelCase__ = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
@require_tf
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''tf''' )
lowerCamelCase__ = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
@slow
@require_torch
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = pipeline('''text-classification''' )
lowerCamelCase__ = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{'''label''': '''POSITIVE''', '''score''': 1.0}] )
lowerCamelCase__ = text_classifier('''This is bad !''' )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{'''label''': '''NEGATIVE''', '''score''': 1.0}] )
lowerCamelCase__ = text_classifier('''Birds are a type of animal''' )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{'''label''': '''POSITIVE''', '''score''': 0.988}] )
@slow
@require_tf
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = pipeline('''text-classification''' , framework='''tf''' )
lowerCamelCase__ = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{'''label''': '''POSITIVE''', '''score''': 1.0}] )
lowerCamelCase__ = text_classifier('''This is bad !''' )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{'''label''': '''NEGATIVE''', '''score''': 1.0}] )
lowerCamelCase__ = text_classifier('''Birds are a type of animal''' )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{'''label''': '''POSITIVE''', '''score''': 0.988}] )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = TextClassificationPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
return text_classifier, ["HuggingFace is in", "This is another test"]
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
lowerCamelCase__ = '''HuggingFace is in'''
lowerCamelCase__ = text_classifier(__lowerCAmelCase )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{'''label''': ANY(__lowerCAmelCase ), '''score''': ANY(__lowerCAmelCase )}] )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
lowerCamelCase__ = ['''HuggingFace is in ''', '''Paris is in France''']
lowerCamelCase__ = text_classifier(__lowerCAmelCase )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [{'''label''': ANY(__lowerCAmelCase ), '''score''': ANY(__lowerCAmelCase )}, {'''label''': ANY(__lowerCAmelCase ), '''score''': ANY(__lowerCAmelCase )}] , )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
self.assertTrue(outputs[1]['''label'''] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
lowerCamelCase__ = text_classifier(__lowerCAmelCase , top_k=__lowerCAmelCase )
lowerCamelCase__ = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [[{'''label''': ANY(__lowerCAmelCase ), '''score''': ANY(__lowerCAmelCase )}] * N, [{'''label''': ANY(__lowerCAmelCase ), '''score''': ANY(__lowerCAmelCase )}] * N] , )
lowerCamelCase__ = {'''text''': '''HuggingFace is in ''', '''text_pair''': '''Paris is in France'''}
lowerCamelCase__ = text_classifier(__lowerCAmelCase )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , {'''label''': ANY(__lowerCAmelCase ), '''score''': ANY(__lowerCAmelCase )} , )
self.assertTrue(outputs['''label'''] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
lowerCamelCase__ = [['''HuggingFace is in ''', '''Paris is in France''']]
with self.assertRaises(__lowerCAmelCase ):
text_classifier(__lowerCAmelCase )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
lowerCamelCase__ = text_classifier([[['''HuggingFace is in ''', '''Paris is in France''']]] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [{'''label''': ANY(__lowerCAmelCase ), '''score''': ANY(__lowerCAmelCase )}] , )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
| 29
|
_a = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
_a = [{"type": "code", "content": INSTALL_CONTENT}]
_a = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 29
| 1
|
import random
def lowerCAmelCase__(__snake_case ) -> bool:
'''simple docstring'''
lowerCamelCase__ = num - 1
lowerCamelCase__ = 0
while s % 2 == 0:
lowerCamelCase__ = s // 2
t += 1
for _ in range(5 ):
lowerCamelCase__ = random.randrange(2 ,num - 1 )
lowerCamelCase__ = pow(__snake_case ,__snake_case ,__snake_case )
if v != 1:
lowerCamelCase__ = 0
while v != (num - 1):
if i == t - 1:
return False
else:
lowerCamelCase__ = i + 1
lowerCamelCase__ = (v**2) % num
return True
def lowerCAmelCase__(__snake_case ) -> bool:
'''simple docstring'''
if num < 2:
return False
lowerCamelCase__ = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(__snake_case )
def lowerCAmelCase__(__snake_case = 1024 ) -> int:
'''simple docstring'''
while True:
lowerCamelCase__ = random.randrange(2 ** (keysize - 1) ,2 ** (keysize) )
if is_prime_low_num(__snake_case ):
return num
if __name__ == "__main__":
_a = generate_large_prime()
print(("Prime number:", num))
print(("is_prime_low_num:", is_prime_low_num(num)))
| 29
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_a = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
_a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 29
| 1
|
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class __A ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = inspect.getfile(accelerate.test_utils )
lowerCamelCase__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
lowerCamelCase__ = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = F'\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n '.split()
lowerCamelCase__ = [sys.executable] + distributed_args
execute_subprocess_async(__lowerCAmelCase , env=os.environ.copy() )
| 29
|
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
_a = logging.get_logger(__name__)
class __A ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
warnings.warn(
'''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use OwlViTImageProcessor instead.''' , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
| 29
| 1
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class __A ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = tempfile.mkdtemp()
lowerCamelCase__ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowerCamelCase__ = {
'''do_resize''': True,
'''size''': 2_0,
'''do_center_crop''': True,
'''crop_size''': 1_8,
'''do_normalize''': True,
'''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073],
'''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
lowerCamelCase__ = os.path.join(self.tmpdirname , __lowerCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
def __lowerCamelCase ( self , **__lowerCAmelCase ):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def __lowerCamelCase ( self , **__lowerCAmelCase ):
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def __lowerCamelCase ( self , **__lowerCAmelCase ):
'''simple docstring'''
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
lowerCamelCase__ = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = self.get_rust_tokenizer()
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase__ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase )
lowerCamelCase__ = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase__ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCAmelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowerCamelCase__ = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 )
lowerCamelCase__ = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
lowerCamelCase__ = self.prepare_image_inputs()
lowerCamelCase__ = image_processor(__lowerCAmelCase , return_tensors='''np''' )
lowerCamelCase__ = processor(images=__lowerCAmelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
lowerCamelCase__ = '''lower newer'''
lowerCamelCase__ = processor(text=__lowerCAmelCase )
lowerCamelCase__ = tokenizer(__lowerCAmelCase , padding='''max_length''' , max_length=6_4 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
lowerCamelCase__ = '''lower newer'''
lowerCamelCase__ = self.prepare_image_inputs()
lowerCamelCase__ = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase ):
processor()
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
lowerCamelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase__ = processor.batch_decode(__lowerCAmelCase )
lowerCamelCase__ = tokenizer.batch_decode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
lowerCamelCase__ = '''lower newer'''
lowerCamelCase__ = self.prepare_image_inputs()
lowerCamelCase__ = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 29
|
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> Any:
'''simple docstring'''
lowerCamelCase__ = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowerCamelCase__ = {
'''wmt16-en-de-dist-12-1''': [2_8.3, 2_7.5_2],
'''wmt16-en-de-dist-6-1''': [2_7.4, 2_7.1_1],
'''wmt16-en-de-12-1''': [2_6.9, 2_5.7_5],
}
lowerCamelCase__ = F'{src_lang}-{tgt_lang}'
lowerCamelCase__ = F'\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "allenai/{model_name}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n'
model_card_dir.mkdir(parents=__snake_case ,exist_ok=__snake_case )
lowerCamelCase__ = os.path.join(__snake_case ,'''README.md''' )
print(F'Generating {path}' )
with open(__snake_case ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(__snake_case )
# make sure we are under the root of the project
_a = Path(__file__).resolve().parent.parent.parent
_a = repo_dir / "model_cards"
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
_a = model_cards_dir / "allenai" / model_name
write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
| 29
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_a = {
"configuration_canine": ["CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP", "CanineConfig"],
"tokenization_canine": ["CanineTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"CANINE_PRETRAINED_MODEL_ARCHIVE_LIST",
"CanineForMultipleChoice",
"CanineForQuestionAnswering",
"CanineForSequenceClassification",
"CanineForTokenClassification",
"CanineLayer",
"CanineModel",
"CaninePreTrainedModel",
"load_tf_weights_in_canine",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
_a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 29
|
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
_a = logging.get_logger(__name__)
class __A ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
warnings.warn(
'''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use SegformerImageProcessor instead.''' , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
| 29
| 1
|
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_a = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
_a = direct_transformers_import(PATH_TO_TRANSFORMERS)
_a = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
_a = re.compile(r"\[(.+?)\]\((https://huggingface\.co/.+?)\)")
_a = {
"DecisionTransformerConfig",
"EncoderDecoderConfig",
"MusicgenConfig",
"RagConfig",
"SpeechEncoderDecoderConfig",
"TimmBackboneConfig",
"VisionEncoderDecoderConfig",
"VisionTextDualEncoderConfig",
"LlamaConfig",
}
def lowerCAmelCase__(__snake_case ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = None
# source code of `config_class`
lowerCamelCase__ = inspect.getsource(__snake_case )
lowerCamelCase__ = _re_checkpoint.findall(__snake_case )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('''/''' ):
lowerCamelCase__ = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
lowerCamelCase__ = F'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
lowerCamelCase__ = ckpt_name
break
return checkpoint
def lowerCAmelCase__() -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
lowerCamelCase__ = get_checkpoint_from_config_class(__snake_case )
lowerCamelCase__ = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__snake_case )
if len(__snake_case ) > 0:
lowerCamelCase__ = '''\n'''.join(sorted(__snake_case ) )
raise ValueError(F'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 29
|
from queue import PriorityQueue
from typing import Any
import numpy as np
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,) -> float | int:
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
lowerCamelCase__ = cst_fwd.get(__snake_case ,np.inf )
lowerCamelCase__ = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
lowerCamelCase__ = new_cost_f
lowerCamelCase__ = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
lowerCamelCase__ = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> int:
'''simple docstring'''
lowerCamelCase__ = -1
lowerCamelCase__ = set()
lowerCamelCase__ = set()
lowerCamelCase__ = {source: 0}
lowerCamelCase__ = {destination: 0}
lowerCamelCase__ = {source: None}
lowerCamelCase__ = {destination: None}
lowerCamelCase__ = PriorityQueue()
lowerCamelCase__ = PriorityQueue()
lowerCamelCase__ = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
lowerCamelCase__ , lowerCamelCase__ = queue_forward.get()
visited_forward.add(__snake_case )
lowerCamelCase__ , lowerCamelCase__ = queue_backward.get()
visited_backward.add(__snake_case )
lowerCamelCase__ = pass_and_relaxation(
__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,)
lowerCamelCase__ = pass_and_relaxation(
__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,)
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
lowerCamelCase__ = shortest_distance
return shortest_path_distance
_a = {
"B": [["C", 1]],
"C": [["D", 1]],
"D": [["F", 1]],
"E": [["B", 1], ["G", 2]],
"F": [],
"G": [["F", 1]],
}
_a = {
"B": [["E", 1]],
"C": [["B", 1]],
"D": [["C", 1]],
"F": [["D", 1], ["G", 1]],
"E": [[None, np.inf]],
"G": [["E", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29
| 1
|
def lowerCAmelCase__(__snake_case ,__snake_case ) -> int:
'''simple docstring'''
if len(__snake_case ) != len(__snake_case ):
raise ValueError('''String lengths must match!''' )
lowerCamelCase__ = 0
for chara, chara in zip(__snake_case ,__snake_case ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29
|
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = """ClapFeatureExtractor"""
lowerCAmelCase_ = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
def __call__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = kwargs.pop('''sampling_rate''' , __lowerCAmelCase )
if text is None and audios is None:
raise ValueError('''You have to specify either text or audios. Both cannot be none.''' )
if text is not None:
lowerCamelCase__ = self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if audios is not None:
lowerCamelCase__ = self.feature_extractor(
__lowerCAmelCase , sampling_rate=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if text is not None and audios is not None:
lowerCamelCase__ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCAmelCase ) , tensor_type=__lowerCAmelCase )
def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.tokenizer.model_input_names
lowerCamelCase__ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 29
| 1
|
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
_a = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class __A :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_6 , __lowerCAmelCase=1_3 , __lowerCAmelCase=7 , __lowerCAmelCase=1_4 , __lowerCAmelCase=1_0 , __lowerCAmelCase=1_9 , __lowerCAmelCase=5 , __lowerCAmelCase=4 , __lowerCAmelCase=True , __lowerCAmelCase=1_6 , __lowerCAmelCase=2 , __lowerCAmelCase=4 , __lowerCAmelCase=4 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=[1, 2, 3, 4, 5] , __lowerCAmelCase=2_5 , __lowerCAmelCase=5 , ):
'''simple docstring'''
lowerCamelCase__ = d_model
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = prediction_length
lowerCamelCase__ = context_length
lowerCamelCase__ = cardinality
lowerCamelCase__ = num_time_features
lowerCamelCase__ = lags_sequence
lowerCamelCase__ = embedding_dimension
lowerCamelCase__ = is_training
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = context_length
lowerCamelCase__ = prediction_length + label_length
lowerCamelCase__ = label_length
lowerCamelCase__ = moving_average
lowerCamelCase__ = autocorrelation_factor
def __lowerCamelCase ( self ):
'''simple docstring'''
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = config.context_length + max(config.lags_sequence )
lowerCamelCase__ = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
lowerCamelCase__ = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
lowerCamelCase__ = floats_tensor([self.batch_size, _past_length] )
lowerCamelCase__ = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
lowerCamelCase__ = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
lowerCamelCase__ = floats_tensor([self.batch_size, config.prediction_length] )
lowerCamelCase__ = {
'''past_values''': past_values,
'''static_categorical_features''': static_categorical_features,
'''past_time_features''': past_time_features,
'''past_observed_mask''': past_observed_mask,
'''future_time_features''': future_time_features,
'''future_values''': future_values,
}
return inputs_dict
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.get_config()
lowerCamelCase__ = self.prepare_autoformer_inputs_dict(__lowerCAmelCase )
return config, inputs_dict
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = self.prepare_config_and_inputs()
return config, inputs_dict
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = AutoformerModel(config=__lowerCAmelCase ).to(__lowerCAmelCase ).eval()
lowerCamelCase__ = model(**__lowerCAmelCase )
lowerCamelCase__ = outputs.encoder_last_hidden_state
lowerCamelCase__ = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ = model.get_encoder()
encoder.save_pretrained(__lowerCAmelCase )
lowerCamelCase__ = AutoformerEncoder.from_pretrained(__lowerCAmelCase ).to(__lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = model.create_network_inputs(**__lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
lowerCamelCase__ = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
lowerCamelCase__ = encoder(inputs_embeds=__lowerCAmelCase )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
lowerCamelCase__ = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
lowerCamelCase__ = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
lowerCamelCase__ = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
lowerCamelCase__ = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ = model.get_decoder()
decoder.save_pretrained(__lowerCAmelCase )
lowerCamelCase__ = AutoformerDecoder.from_pretrained(__lowerCAmelCase ).to(__lowerCAmelCase )
lowerCamelCase__ = decoder(
trend=__lowerCAmelCase , inputs_embeds=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
lowerCAmelCase_ = (AutoformerForPrediction,) if is_torch_available() else ()
lowerCAmelCase_ = {"""feature-extraction""": AutoformerModel} if is_torch_available() else {}
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = AutoformerModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(__lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ = model_class.from_pretrained(__lowerCAmelCase , output_loading_info=__lowerCAmelCase )
self.assertEqual(info['''missing_keys'''] , [] )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__lowerCAmelCase )
@unittest.skip(reason='''Model has no tokens embeddings''' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = inspect.signature(getattr(__lowerCAmelCase , '''forward''' ) )
# The main input is the name of the argument after `self`
lowerCamelCase__ = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , __lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(__lowerCAmelCase )
lowerCamelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ = [*signature.parameters.keys()]
lowerCamelCase__ = [
'''past_values''',
'''past_time_features''',
'''past_observed_mask''',
'''static_categorical_features''',
'''static_real_features''',
'''future_values''',
'''future_time_features''',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('''future_observed_mask''' )
expected_arg_names.extend(
[
'''decoder_attention_mask''',
'''head_mask''',
'''decoder_head_mask''',
'''cross_attn_head_mask''',
'''encoder_outputs''',
'''past_key_values''',
'''output_hidden_states''',
'''output_attentions''',
'''use_cache''',
'''return_dict''',
] )
self.assertListEqual(arg_names[: len(__lowerCAmelCase )] , __lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = True
lowerCamelCase__ = getattr(self.model_tester , '''seq_length''' , __lowerCAmelCase )
lowerCamelCase__ = getattr(self.model_tester , '''decoder_seq_length''' , __lowerCAmelCase )
lowerCamelCase__ = getattr(self.model_tester , '''encoder_seq_length''' , __lowerCAmelCase )
lowerCamelCase__ = getattr(self.model_tester , '''d_model''' , __lowerCAmelCase )
lowerCamelCase__ = getattr(self.model_tester , '''num_attention_heads''' , __lowerCAmelCase )
lowerCamelCase__ = d_model // num_attention_heads
for model_class in self.all_model_classes:
lowerCamelCase__ = True
lowerCamelCase__ = False
lowerCamelCase__ = True
lowerCamelCase__ = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
lowerCamelCase__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__lowerCAmelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase__ = True
lowerCamelCase__ = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
lowerCamelCase__ = outputs.encoder_attentions
self.assertEqual(len(__lowerCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
lowerCamelCase__ = len(__lowerCAmelCase )
lowerCamelCase__ = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
# decoder attentions
lowerCamelCase__ = outputs.decoder_attentions
self.assertIsInstance(__lowerCAmelCase , (list, tuple) )
self.assertEqual(len(__lowerCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
lowerCamelCase__ = outputs.cross_attentions
self.assertIsInstance(__lowerCAmelCase , (list, tuple) )
self.assertEqual(len(__lowerCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
self.assertEqual(out_len + 2 , len(__lowerCAmelCase ) )
lowerCamelCase__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__lowerCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def __lowerCamelCase ( self ):
'''simple docstring'''
super().test_retain_grad_hidden_states_attentions()
def lowerCAmelCase__(__snake_case="train-batch.pt" ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = hf_hub_download(repo_id='''hf-internal-testing/tourism-monthly-batch''' ,filename=__snake_case ,repo_type='''dataset''' )
lowerCamelCase__ = torch.load(__snake_case ,map_location=__snake_case )
return batch
@require_torch
@slow
class __A ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = AutoformerModel.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(__lowerCAmelCase )
lowerCamelCase__ = prepare_batch()
with torch.no_grad():
lowerCamelCase__ = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , future_values=batch['''future_values'''] , future_time_features=batch['''future_time_features'''] , )[0]
lowerCamelCase__ = torch.Size(
(6_4, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , __lowerCAmelCase )
lowerCamelCase__ = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=__lowerCAmelCase )
self.assertTrue(torch.allclose(output[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(__lowerCAmelCase )
lowerCamelCase__ = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
lowerCamelCase__ = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , ).encoder_last_hidden_state
lowerCamelCase__ = torch.Size((6_4, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , __lowerCAmelCase )
lowerCamelCase__ = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=__lowerCAmelCase )
self.assertTrue(torch.allclose(output[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(__lowerCAmelCase )
lowerCamelCase__ = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
lowerCamelCase__ = model.generate(
static_categorical_features=batch['''static_categorical_features'''] , past_time_features=batch['''past_time_features'''] , past_values=batch['''past_values'''] , future_time_features=batch['''future_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , )
lowerCamelCase__ = torch.Size((6_4, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , __lowerCAmelCase )
lowerCamelCase__ = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=__lowerCAmelCase )
lowerCamelCase__ = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , __lowerCAmelCase , rtol=1E-1 ) )
| 29
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __A :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=9_9 , __lowerCAmelCase=3_2 , __lowerCAmelCase=2 , __lowerCAmelCase=4 , __lowerCAmelCase=3_7 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=1_6 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , __lowerCAmelCase=0 , ):
'''simple docstring'''
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_input_mask
lowerCamelCase__ = use_token_type_ids
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = type_sequence_label_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = num_labels
lowerCamelCase__ = num_choices
lowerCamelCase__ = scope
lowerCamelCase__ = projection_dim
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ = None
if self.use_token_type_ids:
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , )
lowerCamelCase__ = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = TFDPRContextEncoder(config=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = TFDPRQuestionEncoder(config=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = TFDPRReader(config=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = config_and_inputs
lowerCamelCase__ = {'''input_ids''': input_ids}
return config, inputs_dict
@require_tf
class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ = {"""feature-extraction""": TFDPRQuestionEncoder} if is_tf_available() else {}
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = TFDPRModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*__lowerCAmelCase )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFDPRContextEncoder.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFDPRContextEncoder.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFDPRQuestionEncoder.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFDPRReader.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_tf
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''' )
lowerCamelCase__ = tf.constant(
[[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_0_3, 2_0_2_6, 3_8_9_9, 1_0_1_4_0, 1_0_2_9, 1_0_2]] ) # [CLS] hello, is my dog cute? [SEP]
lowerCamelCase__ = model(__lowerCAmelCase )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
lowerCamelCase__ = tf.constant(
[
[
0.0323_6253,
0.1275_3335,
0.1681_8509,
0.0027_9786,
0.389_6933,
0.2426_4945,
0.217_8971,
-0.0233_5227,
-0.0848_1959,
-0.1432_4117,
]
] )
self.assertTrue(numpy.allclose(output[:, :1_0].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 29
| 1
|
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __A ( lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = DebertaTokenizer
lowerCAmelCase_ = True
lowerCAmelCase_ = DebertaTokenizerFast
def __lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase__ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
lowerCamelCase__ = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
lowerCamelCase__ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCamelCase__ = {'''unk_token''': '''[UNK]'''}
lowerCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowerCAmelCase ) )
def __lowerCamelCase ( self , **__lowerCAmelCase ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = '''lower newer'''
lowerCamelCase__ = '''lower newer'''
return input_text, output_text
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = '''lower newer'''
lowerCamelCase__ = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
lowerCamelCase__ = tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = tokens + [tokenizer.unk_token]
lowerCamelCase__ = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = tokenizer('''Hello''' , '''World''' )
lowerCamelCase__ = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , __lowerCAmelCase )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
lowerCamelCase__ = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowerCAmelCase )
lowerCamelCase__ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowerCAmelCase )
lowerCamelCase__ = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
lowerCamelCase__ = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
lowerCamelCase__ = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase )
lowerCamelCase__ = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase , __lowerCAmelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
lowerCamelCase__ = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
lowerCamelCase__ = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
lowerCamelCase__ = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase )
lowerCamelCase__ = [tokenizer.decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase ) for seq in encoding['''input_ids''']]
# fmt: off
lowerCamelCase__ = {
'''input_ids''': [
[1, 2_1_1_8, 1_1_1_2_6, 5_6_5, 3_5, 8_3, 2_5_1_9_1, 1_6_3, 1_8_8_5_4, 1_3, 1_2_1_5_6, 1_2, 1_6_1_0_1, 2_5_3_7_6, 1_3_8_0_7, 9, 2_2_2_0_5, 2_7_8_9_3, 1_6_3_5, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2_1_1_8, 1_1_1_2_6, 5_6_5, 2_4_5_3_6, 8_0, 4_3_7_9_7, 4_8_7_8, 7_3_7_3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_3_3, 7_8, 6_5, 1_6, 1_0, 3_7_2_4, 1_5_3_8, 3_3_1_8_3, 1_1_3_0_3, 4_3_7_9_7, 1_9_3_8, 4, 8_7_0, 2_4_1_6_5, 2_9_1_0_5, 5, 7_3_9, 3_2_6_4_4, 3_3_1_8_3, 1_1_3_0_3, 3_6_1_7_3, 8_8, 8_0, 6_5_0, 7_8_2_1, 4_5_9_4_0, 6, 5_2, 2_5_5_9, 5, 1_8_3_6, 9, 5, 7_3_9_7, 1_3_1_7_1, 3_1, 5, 1_8_3_6, 9, 3_2_6_4_4, 3_3_1_8_3, 1_1_3_0_3, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
lowerCamelCase__ = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , __lowerCAmelCase )
for expected, decoded in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
| 29
|
import string
from math import logaa
def lowerCAmelCase__(__snake_case ,__snake_case ) -> int:
'''simple docstring'''
lowerCamelCase__ = document.translate(
str.maketrans('''''' ,'''''' ,string.punctuation ) ).replace('''\n''' ,'''''' )
lowerCamelCase__ = document_without_punctuation.split(''' ''' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def lowerCAmelCase__(__snake_case ,__snake_case ) -> tuple[int, int]:
'''simple docstring'''
lowerCamelCase__ = corpus.lower().translate(
str.maketrans('''''' ,'''''' ,string.punctuation ) ) # strip all punctuation and replace it with ''
lowerCamelCase__ = corpus_without_punctuation.split('''\n''' )
lowerCamelCase__ = term.lower()
return (len([doc for doc in docs if term in doc] ), len(__snake_case ))
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case=False ) -> float:
'''simple docstring'''
if smoothing:
if n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(1 + logaa(n / (1 + df) ) ,3 )
if df == 0:
raise ZeroDivisionError('''df must be > 0''' )
elif n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(logaa(n / df ) ,3 )
def lowerCAmelCase__(__snake_case ,__snake_case ) -> float:
'''simple docstring'''
return round(tf * idf ,3 )
| 29
| 1
|
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class __A ( lowerCAmelCase ):
'''simple docstring'''
def __get__( self , __lowerCAmelCase , __lowerCAmelCase=None ):
'''simple docstring'''
if obj is None:
return self
if self.fget is None:
raise AttributeError('''unreadable attribute''' )
lowerCamelCase__ = '''__cached_''' + self.fget.__name__
lowerCamelCase__ = getattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if cached is None:
lowerCamelCase__ = self.fget(__lowerCAmelCase )
setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return cached
def lowerCAmelCase__(__snake_case ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F'invalid truth value {val!r}' )
def lowerCAmelCase__(__snake_case ) -> List[str]:
'''simple docstring'''
if is_torch_fx_proxy(__snake_case ):
return True
if is_torch_available():
import torch
if isinstance(__snake_case ,torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(__snake_case ,tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(__snake_case ,(jnp.ndarray, Tracer) ):
return True
return isinstance(__snake_case ,np.ndarray )
def lowerCAmelCase__(__snake_case ) -> Any:
'''simple docstring'''
return isinstance(__snake_case ,np.ndarray )
def lowerCAmelCase__(__snake_case ) -> Dict:
'''simple docstring'''
return _is_numpy(__snake_case )
def lowerCAmelCase__(__snake_case ) -> int:
'''simple docstring'''
import torch
return isinstance(__snake_case ,torch.Tensor )
def lowerCAmelCase__(__snake_case ) -> Optional[int]:
'''simple docstring'''
return False if not is_torch_available() else _is_torch(__snake_case )
def lowerCAmelCase__(__snake_case ) -> str:
'''simple docstring'''
import torch
return isinstance(__snake_case ,torch.device )
def lowerCAmelCase__(__snake_case ) -> Optional[int]:
'''simple docstring'''
return False if not is_torch_available() else _is_torch_device(__snake_case )
def lowerCAmelCase__(__snake_case ) -> List[Any]:
'''simple docstring'''
import torch
if isinstance(__snake_case ,__snake_case ):
if hasattr(__snake_case ,__snake_case ):
lowerCamelCase__ = getattr(__snake_case ,__snake_case )
else:
return False
return isinstance(__snake_case ,torch.dtype )
def lowerCAmelCase__(__snake_case ) -> Any:
'''simple docstring'''
return False if not is_torch_available() else _is_torch_dtype(__snake_case )
def lowerCAmelCase__(__snake_case ) -> Optional[Any]:
'''simple docstring'''
import tensorflow as tf
return isinstance(__snake_case ,tf.Tensor )
def lowerCAmelCase__(__snake_case ) -> Union[str, Any]:
'''simple docstring'''
return False if not is_tf_available() else _is_tensorflow(__snake_case )
def lowerCAmelCase__(__snake_case ) -> Optional[int]:
'''simple docstring'''
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(__snake_case ,'''is_symbolic_tensor''' ):
return tf.is_symbolic_tensor(__snake_case )
return type(__snake_case ) == tf.Tensor
def lowerCAmelCase__(__snake_case ) -> str:
'''simple docstring'''
return False if not is_tf_available() else _is_tf_symbolic_tensor(__snake_case )
def lowerCAmelCase__(__snake_case ) -> List[str]:
'''simple docstring'''
import jax.numpy as jnp # noqa: F811
return isinstance(__snake_case ,jnp.ndarray )
def lowerCAmelCase__(__snake_case ) -> str:
'''simple docstring'''
return False if not is_flax_available() else _is_jax(__snake_case )
def lowerCAmelCase__(__snake_case ) -> Optional[Any]:
'''simple docstring'''
if isinstance(__snake_case ,(dict, UserDict) ):
return {k: to_py_obj(__snake_case ) for k, v in obj.items()}
elif isinstance(__snake_case ,(list, tuple) ):
return [to_py_obj(__snake_case ) for o in obj]
elif is_tf_tensor(__snake_case ):
return obj.numpy().tolist()
elif is_torch_tensor(__snake_case ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(__snake_case ):
return np.asarray(__snake_case ).tolist()
elif isinstance(__snake_case ,(np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def lowerCAmelCase__(__snake_case ) -> Any:
'''simple docstring'''
if isinstance(__snake_case ,(dict, UserDict) ):
return {k: to_numpy(__snake_case ) for k, v in obj.items()}
elif isinstance(__snake_case ,(list, tuple) ):
return np.array(__snake_case )
elif is_tf_tensor(__snake_case ):
return obj.numpy()
elif is_torch_tensor(__snake_case ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(__snake_case ):
return np.asarray(__snake_case )
else:
return obj
class __A ( lowerCAmelCase ):
'''simple docstring'''
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = fields(self )
# Safety and consistency checks
if not len(__lowerCAmelCase ):
raise ValueError(F'{self.__class__.__name__} has no fields.' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F'{self.__class__.__name__} should not have more than one required field.' )
lowerCamelCase__ = getattr(self , class_fields[0].name )
lowerCamelCase__ = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(__lowerCAmelCase ):
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ = first_field.items()
lowerCamelCase__ = True
else:
try:
lowerCamelCase__ = iter(__lowerCAmelCase )
lowerCamelCase__ = True
except TypeError:
lowerCamelCase__ = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(__lowerCAmelCase ):
if (
not isinstance(__lowerCAmelCase , (list, tuple) )
or not len(__lowerCAmelCase ) == 2
or not isinstance(element[0] , __lowerCAmelCase )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
lowerCamelCase__ = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F'Cannot set key/value for {element}. It needs to be a tuple (key, value).' )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
lowerCamelCase__ = element[1]
elif first_field is not None:
lowerCamelCase__ = first_field
else:
for field in class_fields:
lowerCamelCase__ = getattr(self , field.name )
if v is not None:
lowerCamelCase__ = v
def __delitem__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
raise Exception(F'You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.' )
def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
raise Exception(F'You cannot use ``setdefault`` on a {self.__class__.__name__} instance.' )
def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
raise Exception(F'You cannot use ``pop`` on a {self.__class__.__name__} instance.' )
def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
raise Exception(F'You cannot use ``update`` on a {self.__class__.__name__} instance.' )
def __getitem__( self , __lowerCAmelCase ):
'''simple docstring'''
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(__lowerCAmelCase , __lowerCAmelCase )
super().__setattr__(__lowerCAmelCase , __lowerCAmelCase )
def __setitem__( self , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
super().__setitem__(__lowerCAmelCase , __lowerCAmelCase )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(__lowerCAmelCase , __lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
return tuple(self[k] for k in self.keys() )
class __A ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
@classmethod
def __lowerCamelCase ( cls , __lowerCAmelCase ):
'''simple docstring'''
raise ValueError(
F'{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}' )
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = """longest"""
lowerCAmelCase_ = """max_length"""
lowerCAmelCase_ = """do_not_pad"""
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = """pt"""
lowerCAmelCase_ = """tf"""
lowerCAmelCase_ = """np"""
lowerCAmelCase_ = """jax"""
class __A :
'''simple docstring'''
def __init__( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = context_managers
lowerCamelCase__ = ExitStack()
def __enter__( self ):
'''simple docstring'''
for context_manager in self.context_managers:
self.stack.enter_context(__lowerCAmelCase )
def __exit__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
self.stack.__exit__(*__lowerCAmelCase , **__lowerCAmelCase )
def lowerCAmelCase__(__snake_case ) -> str:
'''simple docstring'''
lowerCamelCase__ = infer_framework(__snake_case )
if framework == "tf":
lowerCamelCase__ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowerCamelCase__ = inspect.signature(model_class.forward ) # PyTorch models
else:
lowerCamelCase__ = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def lowerCAmelCase__(__snake_case ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = model_class.__name__
lowerCamelCase__ = infer_framework(__snake_case )
if framework == "tf":
lowerCamelCase__ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowerCamelCase__ = inspect.signature(model_class.forward ) # PyTorch models
else:
lowerCamelCase__ = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def lowerCAmelCase__(__snake_case ,__snake_case = "" ,__snake_case = "." ) -> Union[str, Any]:
'''simple docstring'''
def _flatten_dict(__snake_case ,__snake_case="" ,__snake_case="." ):
for k, v in d.items():
lowerCamelCase__ = str(__snake_case ) + delimiter + str(__snake_case ) if parent_key else k
if v and isinstance(__snake_case ,__snake_case ):
yield from flatten_dict(__snake_case ,__snake_case ,delimiter=__snake_case ).items()
else:
yield key, v
return dict(_flatten_dict(__snake_case ,__snake_case ,__snake_case ) )
@contextmanager
def lowerCAmelCase__(__snake_case ,__snake_case = False ) -> List[Any]:
'''simple docstring'''
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def lowerCAmelCase__(__snake_case ,__snake_case=None ) -> List[Any]:
'''simple docstring'''
if is_numpy_array(__snake_case ):
return np.transpose(__snake_case ,axes=__snake_case )
elif is_torch_tensor(__snake_case ):
return array.T if axes is None else array.permute(*__snake_case )
elif is_tf_tensor(__snake_case ):
import tensorflow as tf
return tf.transpose(__snake_case ,perm=__snake_case )
elif is_jax_tensor(__snake_case ):
return jnp.transpose(__snake_case ,axes=__snake_case )
else:
raise ValueError(F'Type not supported for transpose: {type(__snake_case )}.' )
def lowerCAmelCase__(__snake_case ,__snake_case ) -> List[str]:
'''simple docstring'''
if is_numpy_array(__snake_case ):
return np.reshape(__snake_case ,__snake_case )
elif is_torch_tensor(__snake_case ):
return array.reshape(*__snake_case )
elif is_tf_tensor(__snake_case ):
import tensorflow as tf
return tf.reshape(__snake_case ,__snake_case )
elif is_jax_tensor(__snake_case ):
return jnp.reshape(__snake_case ,__snake_case )
else:
raise ValueError(F'Type not supported for reshape: {type(__snake_case )}.' )
def lowerCAmelCase__(__snake_case ,__snake_case=None ) -> Union[str, Any]:
'''simple docstring'''
if is_numpy_array(__snake_case ):
return np.squeeze(__snake_case ,axis=__snake_case )
elif is_torch_tensor(__snake_case ):
return array.squeeze() if axis is None else array.squeeze(dim=__snake_case )
elif is_tf_tensor(__snake_case ):
import tensorflow as tf
return tf.squeeze(__snake_case ,axis=__snake_case )
elif is_jax_tensor(__snake_case ):
return jnp.squeeze(__snake_case ,axis=__snake_case )
else:
raise ValueError(F'Type not supported for squeeze: {type(__snake_case )}.' )
def lowerCAmelCase__(__snake_case ,__snake_case ) -> int:
'''simple docstring'''
if is_numpy_array(__snake_case ):
return np.expand_dims(__snake_case ,__snake_case )
elif is_torch_tensor(__snake_case ):
return array.unsqueeze(dim=__snake_case )
elif is_tf_tensor(__snake_case ):
import tensorflow as tf
return tf.expand_dims(__snake_case ,axis=__snake_case )
elif is_jax_tensor(__snake_case ):
return jnp.expand_dims(__snake_case ,axis=__snake_case )
else:
raise ValueError(F'Type not supported for expand_dims: {type(__snake_case )}.' )
def lowerCAmelCase__(__snake_case ) -> Optional[int]:
'''simple docstring'''
if is_numpy_array(__snake_case ):
return np.size(__snake_case )
elif is_torch_tensor(__snake_case ):
return array.numel()
elif is_tf_tensor(__snake_case ):
import tensorflow as tf
return tf.size(__snake_case )
elif is_jax_tensor(__snake_case ):
return array.size
else:
raise ValueError(F'Type not supported for expand_dims: {type(__snake_case )}.' )
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Optional[Any]:
'''simple docstring'''
for key, value in auto_map.items():
if isinstance(__snake_case ,(tuple, list) ):
lowerCamelCase__ = [F'{repo_id}--{v}' if (v is not None and '''--''' not in v) else v for v in value]
elif value is not None and "--" not in value:
lowerCamelCase__ = F'{repo_id}--{value}'
return auto_map
def lowerCAmelCase__(__snake_case ) -> Dict:
'''simple docstring'''
for base_class in inspect.getmro(__snake_case ):
lowerCamelCase__ = base_class.__module__
lowerCamelCase__ = base_class.__name__
if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('''torch''' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F'Could not infer framework from class {model_class}.' )
| 29
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_a = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 29
| 1
|
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
_a = "src/diffusers"
# Matches is_xxx_available()
_a = re.compile(r"is\_([a-z_]*)_available\(\)")
# Matches from xxx import bla
_a = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
_a = "\n{0} = None\n"
_a = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n"
_a = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
def lowerCAmelCase__(__snake_case ) -> List[str]:
'''simple docstring'''
lowerCamelCase__ = _re_backend.findall(__snake_case )
if len(__snake_case ) == 0:
return None
return "_and_".join(__snake_case )
def lowerCAmelCase__() -> List[Any]:
'''simple docstring'''
with open(os.path.join(__snake_case ,'''__init__.py''' ) ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
lowerCamelCase__ = f.readlines()
# Get to the point we do the actual imports for type checking
lowerCamelCase__ = 0
lowerCamelCase__ = {}
# Go through the end of the file
while line_index < len(__snake_case ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
lowerCamelCase__ = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('''else:''' ):
line_index += 1
line_index += 1
lowerCamelCase__ = []
# Until we unindent, add backend objects to the list
while line_index < len(__snake_case ) and len(lines[line_index] ) > 1:
lowerCamelCase__ = lines[line_index]
lowerCamelCase__ = _re_single_line_import.search(__snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(__snake_case ) > 0:
lowerCamelCase__ = objects
else:
line_index += 1
return backend_specific_objects
def lowerCAmelCase__(__snake_case ,__snake_case ) -> List[str]:
'''simple docstring'''
if name.isupper():
return DUMMY_CONSTANT.format(__snake_case )
elif name.islower():
return DUMMY_FUNCTION.format(__snake_case ,__snake_case )
else:
return DUMMY_CLASS.format(__snake_case ,__snake_case )
def lowerCAmelCase__(__snake_case=None ) -> Any:
'''simple docstring'''
if backend_specific_objects is None:
lowerCamelCase__ = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
lowerCamelCase__ = {}
for backend, objects in backend_specific_objects.items():
lowerCamelCase__ = '''[''' + ''', '''.join(F'"{b}"' for b in backend.split('''_and_''' ) ) + ''']'''
lowerCamelCase__ = '''# This file is autogenerated by the command `make fix-copies`, do not edit.\n'''
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(__snake_case ,__snake_case ) for o in objects] )
lowerCamelCase__ = dummy_file
return dummy_files
def lowerCAmelCase__(__snake_case=False ) -> str:
'''simple docstring'''
lowerCamelCase__ = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
lowerCamelCase__ = {'''torch''': '''pt'''}
# Locate actual dummy modules and read their content.
lowerCamelCase__ = os.path.join(__snake_case ,'''utils''' )
lowerCamelCase__ = {
backend: os.path.join(__snake_case ,F'dummy_{short_names.get(__snake_case ,__snake_case )}_objects.py' )
for backend in dummy_files.keys()
}
lowerCamelCase__ = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(__snake_case ):
with open(__snake_case ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
lowerCamelCase__ = f.read()
else:
lowerCamelCase__ = ''''''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'Updating diffusers.utils.dummy_{short_names.get(__snake_case ,__snake_case )}_objects.py as the main '
'''__init__ has new objects.''' )
with open(dummy_file_paths[backend] ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'''The main __init__ has objects that are not present in '''
F'diffusers.utils.dummy_{short_names.get(__snake_case ,__snake_case )}_objects.py. Run `make fix-copies` '
'''to fix this.''' )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_a = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 29
|
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
_a = namedtuple(
"_TestCommandArgs",
[
"dataset",
"name",
"cache_dir",
"data_dir",
"all_configs",
"save_infos",
"ignore_verifications",
"force_redownload",
"clear_cache",
],
defaults=[None, None, None, False, False, False, False, False],
)
def lowerCAmelCase__(__snake_case ,__snake_case ) -> List[str]:
'''simple docstring'''
return (abs(source - target ) / target) < 0.0_1
@pytest.mark.integration
def lowerCAmelCase__(__snake_case ) -> Tuple:
'''simple docstring'''
lowerCamelCase__ = _TestCommandArgs(dataset=__snake_case ,all_configs=__snake_case ,save_infos=__snake_case )
lowerCamelCase__ = TestCommand(*__snake_case )
test_command.run()
lowerCamelCase__ = os.path.join(__snake_case ,'''README.md''' )
assert os.path.exists(__snake_case )
lowerCamelCase__ = DatasetInfosDict.from_directory(__snake_case )
lowerCamelCase__ = DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ) ,splits=[
{
'''name''': '''train''',
'''num_bytes''': 2351563,
'''num_examples''': 10000,
},
{
'''name''': '''validation''',
'''num_bytes''': 238418,
'''num_examples''': 1000,
},
] ,download_size=3940680 ,dataset_size=2589981 ,)
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowerCamelCase__ , lowerCamelCase__ = getattr(dataset_infos['''default'''] ,__snake_case ), getattr(expected_dataset_infos['''default'''] ,__snake_case )
if key == "num_bytes":
assert is_apercent_close(__snake_case ,__snake_case )
elif key == "splits":
assert list(__snake_case ) == list(__snake_case )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes ,expected[split].num_bytes )
else:
result == expected
| 29
| 1
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = """encoder-decoder"""
lowerCAmelCase_ = True
def __init__( self , **__lowerCAmelCase ):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
lowerCamelCase__ = kwargs.pop('''encoder''' )
lowerCamelCase__ = encoder_config.pop('''model_type''' )
lowerCamelCase__ = kwargs.pop('''decoder''' )
lowerCamelCase__ = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
lowerCamelCase__ = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase )
lowerCamelCase__ = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase )
lowerCamelCase__ = True
@classmethod
def __lowerCamelCase ( cls , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
lowerCamelCase__ = True
lowerCamelCase__ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = copy.deepcopy(self.__dict__ )
lowerCamelCase__ = self.encoder.to_dict()
lowerCamelCase__ = self.decoder.to_dict()
lowerCamelCase__ = self.__class__.model_type
return output
| 29
|
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class __A :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = parent
lowerCamelCase__ = 1_3
lowerCamelCase__ = 7
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = 9_9
lowerCamelCase__ = 3_2
lowerCamelCase__ = 2
lowerCamelCase__ = 4
lowerCamelCase__ = 3_7
lowerCamelCase__ = '''gelu'''
lowerCamelCase__ = 0.1
lowerCamelCase__ = 0.1
lowerCamelCase__ = 5_1_2
lowerCamelCase__ = 1_6
lowerCamelCase__ = 2
lowerCamelCase__ = 0.02
lowerCamelCase__ = 3
lowerCamelCase__ = 4
lowerCamelCase__ = None
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self ):
'''simple docstring'''
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = self.prepare_config_and_inputs()
lowerCamelCase__ = True
lowerCamelCase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = TFEsmModel(config=__lowerCAmelCase )
lowerCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowerCamelCase__ = model(__lowerCAmelCase )
lowerCamelCase__ = [input_ids, input_mask]
lowerCamelCase__ = model(__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = True
lowerCamelCase__ = TFEsmModel(config=__lowerCAmelCase )
lowerCamelCase__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''encoder_hidden_states''': encoder_hidden_states,
'''encoder_attention_mask''': encoder_attention_mask,
}
lowerCamelCase__ = model(__lowerCAmelCase )
lowerCamelCase__ = [input_ids, input_mask]
lowerCamelCase__ = model(__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase )
# Also check the case where encoder outputs are not passed
lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = TFEsmForMaskedLM(config=__lowerCAmelCase )
lowerCamelCase__ = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = TFEsmForTokenClassification(config=__lowerCAmelCase )
lowerCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowerCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = config_and_inputs
lowerCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ = (
{
"""feature-extraction""": TFEsmModel,
"""fill-mask""": TFEsmForMaskedLM,
"""text-classification""": TFEsmForSequenceClassification,
"""token-classification""": TFEsmForTokenClassification,
"""zero-shot""": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = TFEsmModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFEsmModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@unittest.skip('''Protein models do not support embedding resizing.''' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip('''Protein models do not support embedding resizing.''' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(__lowerCAmelCase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowerCamelCase__ = model.get_bias()
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
for k, v in name.items():
assert isinstance(__lowerCAmelCase , tf.Variable )
else:
lowerCamelCase__ = model.get_output_embeddings()
assert x is None
lowerCamelCase__ = model.get_bias()
assert name is None
@require_tf
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = TFEsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
lowerCamelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase__ = model(__lowerCAmelCase )[0]
lowerCamelCase__ = [1, 6, 3_3]
self.assertEqual(list(output.numpy().shape ) , __lowerCAmelCase )
# compare the actual values for a slice.
lowerCamelCase__ = tf.constant(
[
[
[8.92_1518, -10.58_9814, -6.467_1307],
[-6.396_7156, -13.91_1377, -1.121_1915],
[-7.78_1247, -13.95_1557, -3.74_0592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = TFEsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
lowerCamelCase__ = tf.constant([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
lowerCamelCase__ = model(__lowerCAmelCase )[0]
# compare the actual values for a slice.
lowerCamelCase__ = tf.constant(
[
[
[0.1444_3092, 0.5412_5327, 0.324_7739],
[0.3034_0484, 0.0052_6676, 0.3107_7722],
[0.3227_8043, -0.2498_7096, 0.341_4628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 29
| 1
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowerCAmelCase__(__snake_case ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
lowerCamelCase__ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCamelCase__ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCamelCase__ = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowerCamelCase__ = [3, 3, 3, 3]
lowerCamelCase__ = [5, 5, 5, 5]
elif "fl4" in model_name:
lowerCamelCase__ = [4, 4, 4, 4]
lowerCamelCase__ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowerCamelCase__ = [3, 3, 3, 3]
if "lrf" in model_name:
lowerCamelCase__ = [3, 3, 3, 3]
else:
lowerCamelCase__ = [2, 2, 2, 2]
if "tiny" in model_name:
lowerCamelCase__ = 96
elif "small" in model_name:
lowerCamelCase__ = 96
elif "base" in model_name:
lowerCamelCase__ = 128
elif "large" in model_name:
lowerCamelCase__ = 192
elif "xlarge" in model_name:
lowerCamelCase__ = 256
elif "huge" in model_name:
lowerCamelCase__ = 352
# set label information
lowerCamelCase__ = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
lowerCamelCase__ = '''imagenet-22k-id2label.json'''
else:
lowerCamelCase__ = '''imagenet-1k-id2label.json'''
lowerCamelCase__ = json.load(open(hf_hub_download(__snake_case ,__snake_case ,repo_type='''dataset''' ) ,'''r''' ) )
lowerCamelCase__ = {int(__snake_case ): v for k, v in idalabel.items()}
lowerCamelCase__ = {v: k for k, v in idalabel.items()}
lowerCamelCase__ = FocalNetConfig(
embed_dim=__snake_case ,depths=__snake_case ,focal_levels=__snake_case ,focal_windows=__snake_case ,use_conv_embed=__snake_case ,idalabel=__snake_case ,labelaid=__snake_case ,use_post_layernorm=__snake_case ,use_layerscale=__snake_case ,)
return config
def lowerCAmelCase__(__snake_case ) -> Optional[Any]:
'''simple docstring'''
if "patch_embed.proj" in name:
lowerCamelCase__ = name.replace('''patch_embed.proj''' ,'''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCamelCase__ = name.replace('''patch_embed.norm''' ,'''embeddings.norm''' )
if "layers" in name:
lowerCamelCase__ = '''encoder.''' + name
if "encoder.layers" in name:
lowerCamelCase__ = name.replace('''encoder.layers''' ,'''encoder.stages''' )
if "downsample.proj" in name:
lowerCamelCase__ = name.replace('''downsample.proj''' ,'''downsample.projection''' )
if "blocks" in name:
lowerCamelCase__ = name.replace('''blocks''' ,'''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowerCamelCase__ = name.replace('''modulation.f''' ,'''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowerCamelCase__ = name.replace('''modulation.h''' ,'''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowerCamelCase__ = name.replace('''modulation.proj''' ,'''modulation.projection_out''' )
if name == "norm.weight":
lowerCamelCase__ = '''layernorm.weight'''
if name == "norm.bias":
lowerCamelCase__ = '''layernorm.bias'''
if "head" in name:
lowerCamelCase__ = name.replace('''head''' ,'''classifier''' )
else:
lowerCamelCase__ = '''focalnet.''' + name
return name
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case=False ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
lowerCamelCase__ = model_name_to_url[model_name]
print('''Checkpoint URL: ''' ,__snake_case )
lowerCamelCase__ = torch.hub.load_state_dict_from_url(__snake_case ,map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
lowerCamelCase__ = state_dict.pop(__snake_case )
lowerCamelCase__ = val
lowerCamelCase__ = get_focalnet_config(__snake_case )
lowerCamelCase__ = FocalNetForImageClassification(__snake_case )
model.eval()
# load state dict
model.load_state_dict(__snake_case )
# verify conversion
lowerCamelCase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase__ = BitImageProcessor(
do_resize=__snake_case ,size={'''shortest_edge''': 256} ,resample=PILImageResampling.BILINEAR ,do_center_crop=__snake_case ,crop_size=224 ,do_normalize=__snake_case ,image_mean=__snake_case ,image_std=__snake_case ,)
lowerCamelCase__ = Image.open(requests.get(__snake_case ,stream=__snake_case ).raw )
lowerCamelCase__ = processor(images=__snake_case ,return_tensors='''pt''' )
lowerCamelCase__ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] ,std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
lowerCamelCase__ = image_transforms(__snake_case ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values ,__snake_case ,atol=1E-4 )
lowerCamelCase__ = model(**__snake_case )
lowerCamelCase__ = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' ,model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' ,outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowerCamelCase__ = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
lowerCamelCase__ = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
lowerCamelCase__ = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
lowerCamelCase__ = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
lowerCamelCase__ = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
lowerCamelCase__ = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3] ,__snake_case ,atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'Saving model and processor of {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__snake_case )
processor.save_pretrained(__snake_case )
if push_to_hub:
print(F'Pushing model and processor of {model_name} to the hub...' )
model.push_to_hub(F'{model_name}' )
processor.push_to_hub(F'{model_name}' )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="focalnet-tiny",
type=str,
help="Name of the FocalNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub.",
)
_a = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 29
|
from math import sqrt
def lowerCAmelCase__(__snake_case ) -> bool:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (
number >= 0
), "'number' must been an int and positive"
lowerCamelCase__ = True
# 0 and 1 are none primes.
if number <= 1:
lowerCamelCase__ = False
for divisor in range(2 ,int(round(sqrt(__snake_case ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowerCamelCase__ = False
break
# precondition
assert isinstance(__snake_case ,__snake_case ), "'status' must been from type bool"
return status
def lowerCAmelCase__(__snake_case ) -> Any:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowerCamelCase__ = list(range(2 ,n + 1 ) )
lowerCamelCase__ = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(__snake_case ) ):
for j in range(i + 1 ,len(__snake_case ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowerCamelCase__ = 0
# filters actual prime numbers.
lowerCamelCase__ = [x for x in begin_list if x != 0]
# precondition
assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type list"
return ans
def lowerCAmelCase__(__snake_case ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (n > 2), "'N' must been an int and > 2"
lowerCamelCase__ = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 ,n + 1 ):
if is_prime(__snake_case ):
ans.append(__snake_case )
# precondition
assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type list"
return ans
def lowerCAmelCase__(__snake_case ) -> List[str]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and number >= 0, "'number' must been an int and >= 0"
lowerCamelCase__ = [] # this list will be returns of the function.
# potential prime number factors.
lowerCamelCase__ = 2
lowerCamelCase__ = number
if number == 0 or number == 1:
ans.append(__snake_case )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(__snake_case ):
while quotient != 1:
if is_prime(__snake_case ) and (quotient % factor == 0):
ans.append(__snake_case )
quotient /= factor
else:
factor += 1
else:
ans.append(__snake_case )
# precondition
assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type list"
return ans
def lowerCAmelCase__(__snake_case ) -> List[Any]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCamelCase__ = 0
# prime factorization of 'number'
lowerCamelCase__ = prime_factorization(__snake_case )
lowerCamelCase__ = max(__snake_case )
# precondition
assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type int"
return ans
def lowerCAmelCase__(__snake_case ) -> Dict:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCamelCase__ = 0
# prime factorization of 'number'
lowerCamelCase__ = prime_factorization(__snake_case )
lowerCamelCase__ = min(__snake_case )
# precondition
assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type int"
return ans
def lowerCAmelCase__(__snake_case ) -> List[Any]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ), "'number' must been an int"
assert isinstance(number % 2 == 0 ,__snake_case ), "compare bust been from type bool"
return number % 2 == 0
def lowerCAmelCase__(__snake_case ) -> List[str]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ), "'number' must been an int"
assert isinstance(number % 2 != 0 ,__snake_case ), "compare bust been from type bool"
return number % 2 != 0
def lowerCAmelCase__(__snake_case ) -> List[Any]:
'''simple docstring'''
assert (
isinstance(__snake_case ,__snake_case ) and (number > 2) and is_even(__snake_case )
), "'number' must been an int, even and > 2"
lowerCamelCase__ = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowerCamelCase__ = get_prime_numbers(__snake_case )
lowerCamelCase__ = len(__snake_case )
# run variable for while-loops.
lowerCamelCase__ = 0
lowerCamelCase__ = None
# exit variable. for break up the loops
lowerCamelCase__ = True
while i < len_pn and loop:
lowerCamelCase__ = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowerCamelCase__ = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(__snake_case ,__snake_case )
and (len(__snake_case ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowerCAmelCase__(__snake_case ,__snake_case ) -> str:
'''simple docstring'''
assert (
isinstance(__snake_case ,__snake_case )
and isinstance(__snake_case ,__snake_case )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowerCamelCase__ = 0
while numbera != 0:
lowerCamelCase__ = numbera % numbera
lowerCamelCase__ = numbera
lowerCamelCase__ = rest
# precondition
assert isinstance(__snake_case ,__snake_case ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Any:
'''simple docstring'''
assert (
isinstance(__snake_case ,__snake_case )
and isinstance(__snake_case ,__snake_case )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowerCamelCase__ = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowerCamelCase__ = prime_factorization(__snake_case )
lowerCamelCase__ = prime_factorization(__snake_case )
elif numbera == 1 or numbera == 1:
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = max(__snake_case ,__snake_case )
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowerCamelCase__ = prime_fac_a.count(__snake_case )
lowerCamelCase__ = prime_fac_a.count(__snake_case )
for _ in range(max(__snake_case ,__snake_case ) ):
ans *= n
else:
lowerCamelCase__ = prime_fac_a.count(__snake_case )
for _ in range(__snake_case ):
ans *= n
done.append(__snake_case )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowerCamelCase__ = prime_fac_a.count(__snake_case )
for _ in range(__snake_case ):
ans *= n
done.append(__snake_case )
# precondition
assert isinstance(__snake_case ,__snake_case ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowerCAmelCase__(__snake_case ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (n >= 0), "'number' must been a positive int"
lowerCamelCase__ = 0
lowerCamelCase__ = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(__snake_case ):
ans += 1
# precondition
assert isinstance(__snake_case ,__snake_case ) and is_prime(
__snake_case ), "'ans' must been a prime number and from type int"
return ans
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Dict:
'''simple docstring'''
assert (
is_prime(__snake_case ) and is_prime(__snake_case ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowerCamelCase__ = p_number_a + 1 # jump to the next number
lowerCamelCase__ = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(__snake_case ):
number += 1
while number < p_number_a:
ans.append(__snake_case )
number += 1
# fetch the next prime number.
while not is_prime(__snake_case ):
number += 1
# precondition
assert (
isinstance(__snake_case ,__snake_case )
and ans[0] != p_number_a
and ans[len(__snake_case ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowerCAmelCase__(__snake_case ) -> Tuple:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (n >= 1), "'n' must been int and >= 1"
lowerCamelCase__ = [] # will be returned.
for divisor in range(1 ,n + 1 ):
if n % divisor == 0:
ans.append(__snake_case )
# precondition
assert ans[0] == 1 and ans[len(__snake_case ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowerCAmelCase__(__snake_case ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (
number > 1
), "'number' must been an int and >= 1"
lowerCamelCase__ = get_divisors(__snake_case )
# precondition
assert (
isinstance(__snake_case ,__snake_case )
and (divisors[0] == 1)
and (divisors[len(__snake_case ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Tuple:
'''simple docstring'''
assert (
isinstance(__snake_case ,__snake_case )
and isinstance(__snake_case ,__snake_case )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowerCamelCase__ = gcd(abs(__snake_case ) ,abs(__snake_case ) )
# precondition
assert (
isinstance(__snake_case ,__snake_case )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowerCAmelCase__(__snake_case ) -> Optional[int]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (n >= 0), "'n' must been a int and >= 0"
lowerCamelCase__ = 1 # this will be return.
for factor in range(1 ,n + 1 ):
ans *= factor
return ans
def lowerCAmelCase__(__snake_case ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (n >= 0), "'n' must been an int and >= 0"
lowerCamelCase__ = 0
lowerCamelCase__ = 1
lowerCamelCase__ = 1 # this will be return
for _ in range(n - 1 ):
lowerCamelCase__ = ans
ans += fiba
lowerCamelCase__ = tmp
return ans
| 29
| 1
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = 42
@flax_register_to_config
class __A ( nn.Module , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = 32
lowerCAmelCase_ = 4
lowerCAmelCase_ = 4
lowerCAmelCase_ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
lowerCAmelCase_ = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
lowerCAmelCase_ = False
lowerCAmelCase_ = (320, 640, 1280, 1280)
lowerCAmelCase_ = 2
lowerCAmelCase_ = 8
lowerCAmelCase_ = None
lowerCAmelCase_ = 1280
lowerCAmelCase_ = 0.0
lowerCAmelCase_ = False
lowerCAmelCase_ = jnp.floataa
lowerCAmelCase_ = True
lowerCAmelCase_ = 0
lowerCAmelCase_ = False
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = (1, self.in_channels, self.sample_size, self.sample_size)
lowerCamelCase__ = jnp.zeros(__lowerCAmelCase , dtype=jnp.floataa )
lowerCamelCase__ = jnp.ones((1,) , dtype=jnp.intaa )
lowerCamelCase__ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowerCamelCase__ , lowerCamelCase__ = jax.random.split(__lowerCAmelCase )
lowerCamelCase__ = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )["params"]
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.block_out_channels
lowerCamelCase__ = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCamelCase__ = self.num_attention_heads or self.attention_head_dim
# input
lowerCamelCase__ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowerCamelCase__ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowerCamelCase__ = FlaxTimestepEmbedding(__lowerCAmelCase , dtype=self.dtype )
lowerCamelCase__ = self.only_cross_attention
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ = (num_attention_heads,) * len(self.down_block_types )
# down
lowerCamelCase__ = []
lowerCamelCase__ = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
lowerCamelCase__ = output_channel
lowerCamelCase__ = block_out_channels[i]
lowerCamelCase__ = i == len(__lowerCAmelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCamelCase__ = FlaxCrossAttnDownBlockaD(
in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
lowerCamelCase__ = FlaxDownBlockaD(
in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(__lowerCAmelCase )
lowerCamelCase__ = down_blocks
# mid
lowerCamelCase__ = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
lowerCamelCase__ = []
lowerCamelCase__ = list(reversed(__lowerCAmelCase ) )
lowerCamelCase__ = list(reversed(__lowerCAmelCase ) )
lowerCamelCase__ = list(reversed(__lowerCAmelCase ) )
lowerCamelCase__ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
lowerCamelCase__ = output_channel
lowerCamelCase__ = reversed_block_out_channels[i]
lowerCamelCase__ = reversed_block_out_channels[min(i + 1 , len(__lowerCAmelCase ) - 1 )]
lowerCamelCase__ = i == len(__lowerCAmelCase ) - 1
if up_block_type == "CrossAttnUpBlock2D":
lowerCamelCase__ = FlaxCrossAttnUpBlockaD(
in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , prev_output_channel=__lowerCAmelCase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
lowerCamelCase__ = FlaxUpBlockaD(
in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , prev_output_channel=__lowerCAmelCase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(__lowerCAmelCase )
lowerCamelCase__ = output_channel
lowerCamelCase__ = up_blocks
# out
lowerCamelCase__ = nn.GroupNorm(num_groups=3_2 , epsilon=1E-5 )
lowerCamelCase__ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase = True , __lowerCAmelCase = False , ):
'''simple docstring'''
if not isinstance(__lowerCAmelCase , jnp.ndarray ):
lowerCamelCase__ = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(__lowerCAmelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCamelCase__ = timesteps.astype(dtype=jnp.floataa )
lowerCamelCase__ = jnp.expand_dims(__lowerCAmelCase , 0 )
lowerCamelCase__ = self.time_proj(__lowerCAmelCase )
lowerCamelCase__ = self.time_embedding(__lowerCAmelCase )
# 2. pre-process
lowerCamelCase__ = jnp.transpose(__lowerCAmelCase , (0, 2, 3, 1) )
lowerCamelCase__ = self.conv_in(__lowerCAmelCase )
# 3. down
lowerCamelCase__ = (sample,)
for down_block in self.down_blocks:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ , lowerCamelCase__ = down_block(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , deterministic=not train )
else:
lowerCamelCase__ , lowerCamelCase__ = down_block(__lowerCAmelCase , __lowerCAmelCase , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
lowerCamelCase__ = ()
for down_block_res_sample, down_block_additional_residual in zip(
__lowerCAmelCase , __lowerCAmelCase ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
lowerCamelCase__ = new_down_block_res_samples
# 4. mid
lowerCamelCase__ = self.mid_block(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
lowerCamelCase__ = down_block_res_samples[-(self.layers_per_block + 1) :]
lowerCamelCase__ = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ = up_block(
__lowerCAmelCase , temb=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , res_hidden_states_tuple=__lowerCAmelCase , deterministic=not train , )
else:
lowerCamelCase__ = up_block(__lowerCAmelCase , temb=__lowerCAmelCase , res_hidden_states_tuple=__lowerCAmelCase , deterministic=not train )
# 6. post-process
lowerCamelCase__ = self.conv_norm_out(__lowerCAmelCase )
lowerCamelCase__ = nn.silu(__lowerCAmelCase )
lowerCamelCase__ = self.conv_out(__lowerCAmelCase )
lowerCamelCase__ = jnp.transpose(__lowerCAmelCase , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=__lowerCAmelCase )
| 29
|
from __future__ import annotations
def lowerCAmelCase__(__snake_case ,__snake_case = None ,__snake_case = None ) -> None:
'''simple docstring'''
if start is None:
lowerCamelCase__ = 0
if end is None:
lowerCamelCase__ = len(__snake_case ) - 1
if start >= end:
return
lowerCamelCase__ = (start + end) // 2
slowsort(__snake_case ,__snake_case ,__snake_case )
slowsort(__snake_case ,mid + 1 ,__snake_case )
if sequence[end] < sequence[mid]:
lowerCamelCase__ , lowerCamelCase__ = sequence[mid], sequence[end]
slowsort(__snake_case ,__snake_case ,end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 29
| 1
|
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class __A ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = jnp.ones((batch_size, length) ) / length
return scores
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = None
lowerCamelCase__ = 2_0
lowerCamelCase__ = self._get_uniform_logits(batch_size=2 , length=__lowerCAmelCase )
# tweak scores to not be uniform anymore
lowerCamelCase__ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowerCamelCase__ = scores.at[1, 1_0].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowerCamelCase__ = jax.nn.softmax(__lowerCAmelCase , axis=-1 )
lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=1.3 )
lowerCamelCase__ = jax.nn.softmax(temp_dist_warper_sharper(__lowerCAmelCase , scores.copy() , cur_len=__lowerCAmelCase ) , axis=-1 )
lowerCamelCase__ = jax.nn.softmax(temp_dist_warper_smoother(__lowerCAmelCase , scores.copy() , cur_len=__lowerCAmelCase ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = None
lowerCamelCase__ = 1_0
lowerCamelCase__ = 2
# create ramp distribution
lowerCamelCase__ = np.broadcast_to(np.arange(__lowerCAmelCase )[None, :] , (batch_size, vocab_size) ).copy()
lowerCamelCase__ = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowerCamelCase__ = FlaxTopKLogitsWarper(3 )
lowerCamelCase__ = top_k_warp(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowerCamelCase__ = 5
lowerCamelCase__ = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
lowerCamelCase__ = np.broadcast_to(np.arange(__lowerCAmelCase )[None, :] , (batch_size, length) ).copy()
lowerCamelCase__ = top_k_warp_safety_check(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = None
lowerCamelCase__ = 1_0
lowerCamelCase__ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowerCamelCase__ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 )
lowerCamelCase__ = np.exp(top_p_warp(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowerCamelCase__ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) )
# check edge cases with negative and extreme logits
lowerCamelCase__ = np.broadcast_to(np.arange(__lowerCAmelCase )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowerCamelCase__ = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
lowerCamelCase__ = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
lowerCamelCase__ = top_p_warp(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = 2_0
lowerCamelCase__ = 4
lowerCamelCase__ = 0
lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=__lowerCAmelCase )
# check that min length is applied at length 5
lowerCamelCase__ = ids_tensor((batch_size, 2_0) , vocab_size=2_0 )
lowerCamelCase__ = 5
lowerCamelCase__ = self._get_uniform_logits(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = min_dist_processor(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
lowerCamelCase__ = self._get_uniform_logits(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = 1_5
lowerCamelCase__ = min_dist_processor(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase )
self.assertFalse(jnp.isinf(__lowerCAmelCase ).any() )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = 2_0
lowerCamelCase__ = 4
lowerCamelCase__ = 0
lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__lowerCAmelCase )
# check that all scores are -inf except the bos_token_id score
lowerCamelCase__ = ids_tensor((batch_size, 1) , vocab_size=2_0 )
lowerCamelCase__ = 1
lowerCamelCase__ = self._get_uniform_logits(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = logits_processor(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowerCamelCase__ = 3
lowerCamelCase__ = self._get_uniform_logits(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = logits_processor(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase )
self.assertFalse(jnp.isinf(__lowerCAmelCase ).any() )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = 2_0
lowerCamelCase__ = 4
lowerCamelCase__ = 0
lowerCamelCase__ = 5
lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=__lowerCAmelCase , eos_token_id=__lowerCAmelCase )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowerCamelCase__ = ids_tensor((batch_size, 4) , vocab_size=2_0 )
lowerCamelCase__ = 4
lowerCamelCase__ = self._get_uniform_logits(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = logits_processor(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowerCamelCase__ = 3
lowerCamelCase__ = self._get_uniform_logits(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = logits_processor(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase )
self.assertFalse(jnp.isinf(__lowerCAmelCase ).any() )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = 4
lowerCamelCase__ = 1_0
lowerCamelCase__ = 1_5
lowerCamelCase__ = 2
lowerCamelCase__ = 1
lowerCamelCase__ = 1_5
# dummy input_ids and scores
lowerCamelCase__ = ids_tensor((batch_size, sequence_length) , __lowerCAmelCase )
lowerCamelCase__ = input_ids.copy()
lowerCamelCase__ = self._get_uniform_logits(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = scores.copy()
# instantiate all dist processors
lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase__ = FlaxTopKLogitsWarper(3 )
lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=__lowerCAmelCase )
lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__lowerCAmelCase )
lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=__lowerCAmelCase , eos_token_id=__lowerCAmelCase )
lowerCamelCase__ = 1_0
# no processor list
lowerCamelCase__ = temp_dist_warp(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase )
lowerCamelCase__ = top_k_warp(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase )
lowerCamelCase__ = top_p_warp(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase )
lowerCamelCase__ = min_dist_proc(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase )
lowerCamelCase__ = bos_dist_proc(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase )
lowerCamelCase__ = eos_dist_proc(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase )
# with processor list
lowerCamelCase__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase__ = processor(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = 4
lowerCamelCase__ = 1_0
lowerCamelCase__ = 1_5
lowerCamelCase__ = 2
lowerCamelCase__ = 1
lowerCamelCase__ = 1_5
# dummy input_ids and scores
lowerCamelCase__ = ids_tensor((batch_size, sequence_length) , __lowerCAmelCase )
lowerCamelCase__ = input_ids.copy()
lowerCamelCase__ = self._get_uniform_logits(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = scores.copy()
# instantiate all dist processors
lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase__ = FlaxTopKLogitsWarper(3 )
lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=__lowerCAmelCase )
lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__lowerCAmelCase )
lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=__lowerCAmelCase , eos_token_id=__lowerCAmelCase )
lowerCamelCase__ = 1_0
# no processor list
def run_no_processor_list(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ = temp_dist_warp(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase )
lowerCamelCase__ = top_k_warp(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase )
lowerCamelCase__ = top_p_warp(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase )
lowerCamelCase__ = min_dist_proc(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase )
lowerCamelCase__ = bos_dist_proc(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase )
lowerCamelCase__ = eos_dist_proc(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase )
return scores
# with processor list
def run_processor_list(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase__ = processor(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase )
return scores
lowerCamelCase__ = jax.jit(__lowerCAmelCase )
lowerCamelCase__ = jax.jit(__lowerCAmelCase )
lowerCamelCase__ = jitted_run_no_processor_list(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = jitted_run_processor_list(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 29
|
from __future__ import annotations
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> float:
'''simple docstring'''
if days_between_payments <= 0:
raise ValueError('''days_between_payments must be > 0''' )
if daily_interest_rate < 0:
raise ValueError('''daily_interest_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * daily_interest_rate * days_between_payments
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,) -> float:
'''simple docstring'''
if number_of_compounding_periods <= 0:
raise ValueError('''number_of_compounding_periods must be > 0''' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,) -> float:
'''simple docstring'''
if number_of_years <= 0:
raise ValueError('''number_of_years must be > 0''' )
if nominal_annual_percentage_rate < 0:
raise ValueError('''nominal_annual_percentage_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return compound_interest(
__snake_case ,nominal_annual_percentage_rate / 365 ,number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29
| 1
|
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowerCAmelCase__(__snake_case ) -> Dict:
'''simple docstring'''
for param in module.parameters():
lowerCamelCase__ = False
def lowerCAmelCase__() -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowerCamelCase__ = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def lowerCAmelCase__(__snake_case ) -> List[Any]:
'''simple docstring'''
lowerCamelCase__ = plt.imshow(__snake_case )
fig.axes.get_xaxis().set_visible(__snake_case )
fig.axes.get_yaxis().set_visible(__snake_case )
plt.show()
def lowerCAmelCase__() -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = datetime.now()
lowerCamelCase__ = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 29
|
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowerCAmelCase__(__snake_case ) -> Union[str, Any]:
'''simple docstring'''
def wrapper(*__snake_case ,**__snake_case ):
lowerCamelCase__ = timeit.default_timer()
lowerCamelCase__ = func(*__snake_case ,**__snake_case )
lowerCamelCase__ = timeit.default_timer() - starttime
return delta
lowerCamelCase__ = func.__name__
return wrapper
def lowerCAmelCase__(__snake_case ,__snake_case=100 ,__snake_case=None ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = []
lowerCamelCase__ = seq_shapes or {}
for i in range(__snake_case ):
lowerCamelCase__ = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(__snake_case ,_ArrayXD ):
lowerCamelCase__ = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(__snake_case ,datasets.Value ):
if v.dtype == "string":
lowerCamelCase__ = '''The small grey turtle was surprisingly fast when challenged.'''
else:
lowerCamelCase__ = np.random.randint(10 ,size=1 ).astype(v.dtype ).item()
elif isinstance(__snake_case ,datasets.Sequence ):
while isinstance(__snake_case ,datasets.Sequence ):
lowerCamelCase__ = v.feature
lowerCamelCase__ = seq_shapes[k]
lowerCamelCase__ = np.random.rand(*__snake_case ).astype(v.dtype )
lowerCamelCase__ = data
dummy_data.append((i, example) )
return dummy_data
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case=100 ,__snake_case=None ) -> str:
'''simple docstring'''
lowerCamelCase__ = generate_examples(__snake_case ,num_examples=__snake_case ,seq_shapes=__snake_case )
with ArrowWriter(features=__snake_case ,path=__snake_case ) as writer:
for key, record in dummy_data:
lowerCamelCase__ = features.encode_example(__snake_case )
writer.write(__snake_case )
lowerCamelCase__ , lowerCamelCase__ = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F'Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.' )
lowerCamelCase__ = datasets.Dataset.from_file(filename=__snake_case ,info=datasets.DatasetInfo(features=__snake_case ) )
return dataset
| 29
| 1
|
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"
)
_a = None
_a = {
"7B": 11_008,
"13B": 13_824,
"30B": 17_920,
"65B": 22_016,
"70B": 28_672,
}
_a = {
"7B": 1,
"7Bf": 1,
"13B": 2,
"13Bf": 2,
"30B": 4,
"65B": 8,
"70B": 8,
"70Bf": 8,
}
def lowerCAmelCase__(__snake_case ,__snake_case=1 ,__snake_case=256 ) -> Union[str, Any]:
'''simple docstring'''
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def lowerCAmelCase__(__snake_case ) -> List[str]:
'''simple docstring'''
with open(__snake_case ,'''r''' ) as f:
return json.load(__snake_case )
def lowerCAmelCase__(__snake_case ,__snake_case ) -> int:
'''simple docstring'''
with open(__snake_case ,'''w''' ) as f:
json.dump(__snake_case ,__snake_case )
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case=True ) -> List[Any]:
'''simple docstring'''
os.makedirs(__snake_case ,exist_ok=__snake_case )
lowerCamelCase__ = os.path.join(__snake_case ,'''tmp''' )
os.makedirs(__snake_case ,exist_ok=__snake_case )
lowerCamelCase__ = read_json(os.path.join(__snake_case ,'''params.json''' ) )
lowerCamelCase__ = NUM_SHARDS[model_size]
lowerCamelCase__ = params['''n_layers''']
lowerCamelCase__ = params['''n_heads''']
lowerCamelCase__ = n_heads // num_shards
lowerCamelCase__ = params['''dim''']
lowerCamelCase__ = dim // n_heads
lowerCamelCase__ = 1_0_0_0_0.0
lowerCamelCase__ = 1.0 / (base ** (torch.arange(0 ,__snake_case ,2 ).float() / dims_per_head))
if "n_kv_heads" in params:
lowerCamelCase__ = params['''n_kv_heads'''] # for GQA / MQA
lowerCamelCase__ = n_heads_per_shard // num_key_value_heads
lowerCamelCase__ = dim // num_key_value_heads
else: # compatibility with other checkpoints
lowerCamelCase__ = n_heads
lowerCamelCase__ = n_heads_per_shard
lowerCamelCase__ = dim
# permute for sliced rotary
def permute(__snake_case ,__snake_case=n_heads ,__snake_case=dim ,__snake_case=dim ):
return w.view(__snake_case ,dima // n_heads // 2 ,2 ,__snake_case ).transpose(1 ,2 ).reshape(__snake_case ,__snake_case )
print(F'Fetching all parameters from the checkpoint at {input_base_path}.' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
lowerCamelCase__ = torch.load(os.path.join(__snake_case ,'''consolidated.00.pth''' ) ,map_location='''cpu''' )
else:
# Sharded
lowerCamelCase__ = [
torch.load(os.path.join(__snake_case ,F'consolidated.{i:02d}.pth' ) ,map_location='''cpu''' )
for i in range(__snake_case )
]
lowerCamelCase__ = 0
lowerCamelCase__ = {'''weight_map''': {}}
for layer_i in range(__snake_case ):
lowerCamelCase__ = F'pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
lowerCamelCase__ = {
F'model.layers.{layer_i}.self_attn.q_proj.weight': permute(
loaded[F'layers.{layer_i}.attention.wq.weight'] ),
F'model.layers.{layer_i}.self_attn.k_proj.weight': permute(
loaded[F'layers.{layer_i}.attention.wk.weight'] ),
F'model.layers.{layer_i}.self_attn.v_proj.weight': loaded[F'layers.{layer_i}.attention.wv.weight'],
F'model.layers.{layer_i}.self_attn.o_proj.weight': loaded[F'layers.{layer_i}.attention.wo.weight'],
F'model.layers.{layer_i}.mlp.gate_proj.weight': loaded[F'layers.{layer_i}.feed_forward.w1.weight'],
F'model.layers.{layer_i}.mlp.down_proj.weight': loaded[F'layers.{layer_i}.feed_forward.w2.weight'],
F'model.layers.{layer_i}.mlp.up_proj.weight': loaded[F'layers.{layer_i}.feed_forward.w3.weight'],
F'model.layers.{layer_i}.input_layernorm.weight': loaded[F'layers.{layer_i}.attention_norm.weight'],
F'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[F'layers.{layer_i}.ffn_norm.weight'],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
lowerCamelCase__ = {
F'model.layers.{layer_i}.input_layernorm.weight': loaded[0][
F'layers.{layer_i}.attention_norm.weight'
].clone(),
F'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[0][
F'layers.{layer_i}.ffn_norm.weight'
].clone(),
}
lowerCamelCase__ = permute(
torch.cat(
[
loaded[i][F'layers.{layer_i}.attention.wq.weight'].view(__snake_case ,__snake_case ,__snake_case )
for i in range(__snake_case )
] ,dim=0 ,).reshape(__snake_case ,__snake_case ) )
lowerCamelCase__ = permute(
torch.cat(
[
loaded[i][F'layers.{layer_i}.attention.wk.weight'].view(
__snake_case ,__snake_case ,__snake_case )
for i in range(__snake_case )
] ,dim=0 ,).reshape(__snake_case ,__snake_case ) ,__snake_case ,__snake_case ,__snake_case ,)
lowerCamelCase__ = torch.cat(
[
loaded[i][F'layers.{layer_i}.attention.wv.weight'].view(
__snake_case ,__snake_case ,__snake_case )
for i in range(__snake_case )
] ,dim=0 ,).reshape(__snake_case ,__snake_case )
lowerCamelCase__ = torch.cat(
[loaded[i][F'layers.{layer_i}.attention.wo.weight'] for i in range(__snake_case )] ,dim=1 )
lowerCamelCase__ = torch.cat(
[loaded[i][F'layers.{layer_i}.feed_forward.w1.weight'] for i in range(__snake_case )] ,dim=0 )
lowerCamelCase__ = torch.cat(
[loaded[i][F'layers.{layer_i}.feed_forward.w2.weight'] for i in range(__snake_case )] ,dim=1 )
lowerCamelCase__ = torch.cat(
[loaded[i][F'layers.{layer_i}.feed_forward.w3.weight'] for i in range(__snake_case )] ,dim=0 )
lowerCamelCase__ = inv_freq
for k, v in state_dict.items():
lowerCamelCase__ = filename
param_count += v.numel()
torch.save(__snake_case ,os.path.join(__snake_case ,__snake_case ) )
lowerCamelCase__ = F'pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
lowerCamelCase__ = {
'''model.embed_tokens.weight''': loaded['''tok_embeddings.weight'''],
'''model.norm.weight''': loaded['''norm.weight'''],
'''lm_head.weight''': loaded['''output.weight'''],
}
else:
lowerCamelCase__ = {
'''model.norm.weight''': loaded[0]['''norm.weight'''],
'''model.embed_tokens.weight''': torch.cat(
[loaded[i]['''tok_embeddings.weight'''] for i in range(__snake_case )] ,dim=1 ),
'''lm_head.weight''': torch.cat([loaded[i]['''output.weight'''] for i in range(__snake_case )] ,dim=0 ),
}
for k, v in state_dict.items():
lowerCamelCase__ = filename
param_count += v.numel()
torch.save(__snake_case ,os.path.join(__snake_case ,__snake_case ) )
# Write configs
lowerCamelCase__ = {'''total_size''': param_count * 2}
write_json(__snake_case ,os.path.join(__snake_case ,'''pytorch_model.bin.index.json''' ) )
lowerCamelCase__ = params['''ffn_dim_multiplier'''] if '''ffn_dim_multiplier''' in params else 1
lowerCamelCase__ = params['''multiple_of'''] if '''multiple_of''' in params else 256
lowerCamelCase__ = LlamaConfig(
hidden_size=__snake_case ,intermediate_size=compute_intermediate_size(__snake_case ,__snake_case ,__snake_case ) ,num_attention_heads=params['''n_heads'''] ,num_hidden_layers=params['''n_layers'''] ,rms_norm_eps=params['''norm_eps'''] ,num_key_value_heads=__snake_case ,)
config.save_pretrained(__snake_case )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('''Loading the checkpoint in a Llama model.''' )
lowerCamelCase__ = LlamaForCausalLM.from_pretrained(__snake_case ,torch_dtype=torch.floataa ,low_cpu_mem_usage=__snake_case )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('''Saving in the Transformers format.''' )
model.save_pretrained(__snake_case ,safe_serialization=__snake_case )
shutil.rmtree(__snake_case )
def lowerCAmelCase__(__snake_case ,__snake_case ) -> List[Any]:
'''simple docstring'''
lowerCamelCase__ = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(F'Saving a {tokenizer_class.__name__} to {tokenizer_path}.' )
lowerCamelCase__ = tokenizer_class(__snake_case )
tokenizer.save_pretrained(__snake_case )
def lowerCAmelCase__() -> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--input_dir''' ,help='''Location of LLaMA weights, which contains tokenizer.model and model folders''' ,)
parser.add_argument(
'''--model_size''' ,choices=['''7B''', '''7Bf''', '''13B''', '''13Bf''', '''30B''', '''65B''', '''70B''', '''70Bf''', '''tokenizer_only'''] ,)
parser.add_argument(
'''--output_dir''' ,help='''Location to write HF model and tokenizer''' ,)
parser.add_argument('''--safe_serialization''' ,type=__snake_case ,help='''Whether or not to save using `safetensors`.''' )
lowerCamelCase__ = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir ,input_base_path=os.path.join(args.input_dir ,args.model_size ) ,model_size=args.model_size ,safe_serialization=args.safe_serialization ,)
lowerCamelCase__ = os.path.join(args.input_dir ,'''tokenizer.model''' )
write_tokenizer(args.output_dir ,__snake_case )
if __name__ == "__main__":
main()
| 29
|
def lowerCAmelCase__(__snake_case ) -> int:
'''simple docstring'''
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''' )
for cell_n in range(1 ,len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
lowerCamelCase__ = grid[0]
for row_n in range(1 ,len(__snake_case ) ):
lowerCamelCase__ = grid[row_n]
lowerCamelCase__ = fill_row(__snake_case ,__snake_case )
lowerCamelCase__ = grid[row_n]
return grid[-1][-1]
def lowerCAmelCase__(__snake_case ,__snake_case ) -> list:
'''simple docstring'''
current_row[0] += row_above[0]
for cell_n in range(1 ,len(__snake_case ) ):
current_row[cell_n] += min(current_row[cell_n - 1] ,row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29
| 1
|
def lowerCAmelCase__(__snake_case ) -> List[Any]:
'''simple docstring'''
stooge(__snake_case ,0 ,len(__snake_case ) - 1 )
return arr
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> Optional[Any]:
'''simple docstring'''
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
lowerCamelCase__ , lowerCamelCase__ = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
lowerCamelCase__ = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(__snake_case ,__snake_case ,(h - t) )
# Recursively sort last 2/3 elements
stooge(__snake_case ,i + t ,(__snake_case) )
# Recursively sort first 2/3 elements
stooge(__snake_case ,__snake_case ,(h - t) )
if __name__ == "__main__":
_a = input("Enter numbers separated by a comma:\n").strip()
_a = [int(item) for item in user_input.split(",")]
print(stooge_sort(unsorted))
| 29
|
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
_a = logging.get_logger(__name__)
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase_ = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(glue_processors.keys() )} )
lowerCAmelCase_ = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
lowerCAmelCase_ = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.task_name.lower()
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = """train"""
lowerCAmelCase_ = """dev"""
lowerCAmelCase_ = """test"""
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = Split.train , __lowerCAmelCase = None , ):
'''simple docstring'''
warnings.warn(
'''This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' , __lowerCAmelCase , )
lowerCamelCase__ = args
lowerCamelCase__ = glue_processors[args.task_name]()
lowerCamelCase__ = glue_output_modes[args.task_name]
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
try:
lowerCamelCase__ = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
# Load data features from cache or dataset file
lowerCamelCase__ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , )
lowerCamelCase__ = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCamelCase__ , lowerCamelCase__ = label_list[2], label_list[1]
lowerCamelCase__ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase__ = cached_features_file + '''.lock'''
with FileLock(__lowerCAmelCase ):
if os.path.exists(__lowerCAmelCase ) and not args.overwrite_cache:
lowerCamelCase__ = time.time()
lowerCamelCase__ = torch.load(__lowerCAmelCase )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
else:
logger.info(F'Creating features from dataset file at {args.data_dir}' )
if mode == Split.dev:
lowerCamelCase__ = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
lowerCamelCase__ = self.processor.get_test_examples(args.data_dir )
else:
lowerCamelCase__ = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
lowerCamelCase__ = examples[:limit_length]
lowerCamelCase__ = glue_convert_examples_to_features(
__lowerCAmelCase , __lowerCAmelCase , max_length=args.max_seq_length , label_list=__lowerCAmelCase , output_mode=self.output_mode , )
lowerCamelCase__ = time.time()
torch.save(self.features , __lowerCAmelCase )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self , __lowerCAmelCase ):
'''simple docstring'''
return self.features[i]
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.label_list
| 29
| 1
|
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_a = logging.get_logger(__name__)
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = ["""pixel_values"""]
def __init__( self , __lowerCAmelCase = True , __lowerCAmelCase = 3_2 , __lowerCAmelCase=PILImageResampling.BILINEAR , __lowerCAmelCase = True , **__lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = do_resize
lowerCamelCase__ = do_rescale
lowerCamelCase__ = size_divisor
lowerCamelCase__ = resample
super().__init__(**__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = get_image_size(__lowerCAmelCase )
# Rounds the height and width down to the closest multiple of size_divisor
lowerCamelCase__ = height // size_divisor * size_divisor
lowerCamelCase__ = width // size_divisor * size_divisor
lowerCamelCase__ = resize(__lowerCAmelCase , (new_h, new_w) , resample=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
return image
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase ):
'''simple docstring'''
return rescale(image=__lowerCAmelCase , scale=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase=None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = ChannelDimension.FIRST , **__lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = do_resize if do_resize is not None else self.do_resize
lowerCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase__ = size_divisor if size_divisor is not None else self.size_divisor
lowerCamelCase__ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
lowerCamelCase__ = make_list_of_images(__lowerCAmelCase )
if not valid_images(__lowerCAmelCase ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
lowerCamelCase__ = [to_numpy_array(__lowerCAmelCase ) for img in images]
if do_resize:
lowerCamelCase__ = [self.resize(__lowerCAmelCase , size_divisor=__lowerCAmelCase , resample=__lowerCAmelCase ) for image in images]
if do_rescale:
lowerCamelCase__ = [self.rescale(__lowerCAmelCase , scale=1 / 2_5_5 ) for image in images]
lowerCamelCase__ = [to_channel_dimension_format(__lowerCAmelCase , __lowerCAmelCase ) for image in images]
lowerCamelCase__ = {'''pixel_values''': images}
return BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase )
| 29
|
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
_a = datasets.logging.get_logger(__name__)
_a = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
_a = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
_a = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case=False ,__snake_case=False ,__snake_case=True ,__snake_case=False ,__snake_case="dummy_doc" ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = {doc: key_lines}
lowerCamelCase__ = {doc: sys_lines}
lowerCamelCase__ = {}
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ , lowerCamelCase__ = reader.get_doc_mentions(__snake_case ,key_doc_lines[doc] ,__snake_case )
key_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase__ = reader.set_annotated_parse_trees(__snake_case ,key_doc_lines[doc] ,__snake_case ,__snake_case )
lowerCamelCase__ , lowerCamelCase__ = reader.get_doc_mentions(__snake_case ,sys_doc_lines[doc] ,__snake_case )
sys_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase__ = reader.set_annotated_parse_trees(__snake_case ,key_doc_lines[doc] ,__snake_case ,__snake_case )
if remove_nested:
lowerCamelCase__ , lowerCamelCase__ = reader.remove_nested_coref_mentions(__snake_case ,__snake_case )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowerCamelCase__ , lowerCamelCase__ = reader.remove_nested_coref_mentions(__snake_case ,__snake_case )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowerCamelCase__ = reader.get_mention_assignments(__snake_case ,__snake_case )
lowerCamelCase__ = reader.get_mention_assignments(__snake_case ,__snake_case )
lowerCamelCase__ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
F'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' )
logger.info(
'''Number of resulting singleton clusters in the key '''
F'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' )
if not keep_singletons:
logger.info(
F'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '
'''files, respectively''' )
return doc_coref_infos
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> str:
'''simple docstring'''
lowerCamelCase__ = get_coref_infos(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
lowerCamelCase__ = {}
lowerCamelCase__ = 0
lowerCamelCase__ = 0
for name, metric in metrics:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = evaluator.evaluate_documents(__snake_case ,__snake_case ,beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'{name}/recall': recall, F'{name}/precision': precision, F'{name}/f1': fa} )
logger.info(
name.ljust(10 ) ,F'Recall: {recall * 100:.2f}' ,F' Precision: {precision * 100:.2f}' ,F' F1: {fa * 100:.2f}' ,)
if conll_subparts_num == 3:
lowerCamelCase__ = (conll / 3) * 100
logger.info(F'CoNLL score: {conll:.2f}' )
output_scores.update({'''conll_score''': conll} )
return output_scores
def lowerCAmelCase__(__snake_case ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
lowerCamelCase__ = line.split()[5]
if not parse_col == "-":
lowerCamelCase__ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
'''simple docstring'''
def __lowerCamelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Sequence(datasets.Value('''string''' ) ),
} ) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] , )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=False ):
'''simple docstring'''
lowerCamelCase__ = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
lowerCamelCase__ = util.check_gold_parse_annotation(__lowerCAmelCase )
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowerCamelCase__ = evaluate(
key_lines=__lowerCAmelCase , sys_lines=__lowerCAmelCase , metrics=__lowerCAmelCase , NP_only=__lowerCAmelCase , remove_nested=__lowerCAmelCase , keep_singletons=__lowerCAmelCase , min_span=__lowerCAmelCase , )
return score
| 29
| 1
|
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
_a = logging.get_logger(__name__)
_a = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_a = {
"vocab_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json",
},
"merges_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt",
},
"tokenizer_file": {
"Salesforce/codegen-350M-mono": (
"https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"
),
},
}
_a = {
"Salesforce/codegen-350M-mono": 2_048,
}
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ["""input_ids""", """attention_mask"""]
lowerCAmelCase_ = CodeGenTokenizer
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="<|endoftext|>" , __lowerCAmelCase="<|endoftext|>" , __lowerCAmelCase="<|endoftext|>" , __lowerCAmelCase=False , **__lowerCAmelCase , ):
'''simple docstring'''
super().__init__(
__lowerCAmelCase , __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , unk_token=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , **__lowerCAmelCase , )
if kwargs.pop('''add_bos_token''' , __lowerCAmelCase ):
lowerCamelCase__ = kwargs.pop('''name_or_path''' , '''''' )
raise ValueError(
'''Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.'''
'''Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n'''
F'`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n'
F'`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n'
'''This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.'''
''' so that the fast tokenizer works correctly.''' )
lowerCamelCase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __lowerCAmelCase ) != add_prefix_space:
lowerCamelCase__ = getattr(__lowerCAmelCase , pre_tok_state.pop('''type''' ) )
lowerCamelCase__ = add_prefix_space
lowerCamelCase__ = pre_tok_class(**__lowerCAmelCase )
lowerCamelCase__ = add_prefix_space
def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = kwargs.get('''is_split_into_words''' , __lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__lowerCAmelCase , **__lowerCAmelCase )
def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = kwargs.get('''is_split_into_words''' , __lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__lowerCAmelCase , **__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
'''simple docstring'''
lowerCamelCase__ = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase )
return tuple(__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = super().decode(
token_ids=__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , **__lowerCAmelCase , )
if truncate_before_pattern is not None and len(__lowerCAmelCase ) > 0:
lowerCamelCase__ = self.truncate(__lowerCAmelCase , __lowerCAmelCase )
return decoded_text
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
def find_re(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ = pattern.search(__lowerCAmelCase , __lowerCAmelCase )
return m.start() if m else -1
lowerCamelCase__ = [re.compile(__lowerCAmelCase , re.MULTILINE ) for pattern in truncate_before_pattern]
lowerCamelCase__ = list(re.finditer('''^print''' , __lowerCAmelCase , re.MULTILINE ) )
if len(__lowerCAmelCase ) > 1:
lowerCamelCase__ = completion[: prints[1].start()]
lowerCamelCase__ = list(re.finditer('''^def''' , __lowerCAmelCase , re.MULTILINE ) )
if len(__lowerCAmelCase ) > 1:
lowerCamelCase__ = completion[: defs[1].start()]
lowerCamelCase__ = 0
lowerCamelCase__ = [
pos for pos in [find_re(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) for terminal in terminals] if pos != -1
]
if len(__lowerCAmelCase ) > 0:
return completion[: min(__lowerCAmelCase )]
else:
return completion
| 29
|
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
_a = open # noqa: we just need to have a builtin inside this module to test it properly
| 29
| 1
|
def lowerCAmelCase__(__snake_case ) -> float:
'''simple docstring'''
if not nums: # Makes sure that the list is not empty
raise ValueError('''List is empty''' )
lowerCamelCase__ = sum(__snake_case ) / len(__snake_case ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29
|
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
_a = logging.get_logger(__name__)
class __A :
'''simple docstring'''
lowerCAmelCase_ = None
@experimental
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> Tuple:
'''simple docstring'''
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
return _map_with_joblib(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> int:
'''simple docstring'''
lowerCamelCase__ = num_proc if num_proc <= len(__snake_case ) else len(__snake_case )
lowerCamelCase__ = [] # We organize the splits ourselve (contiguous splits)
for index in range(__snake_case ):
lowerCamelCase__ = len(__snake_case ) // num_proc
lowerCamelCase__ = len(__snake_case ) % num_proc
lowerCamelCase__ = div * index + min(__snake_case ,__snake_case )
lowerCamelCase__ = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(__snake_case ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
F'Error dividing inputs iterable among processes. '
F'Total number of objects {len(__snake_case )}, '
F'length: {sum(len(i[1] ) for i in split_kwds )}' )
logger.info(
F'Spawning {num_proc} processes for {len(__snake_case )} objects in slices of {[len(i[1] ) for i in split_kwds]}' )
lowerCamelCase__ , lowerCamelCase__ = None, None
if not disable_tqdm:
lowerCamelCase__ , lowerCamelCase__ = (RLock(),), tqdm.set_lock
with Pool(__snake_case ,initargs=__snake_case ,initializer=__snake_case ) as pool:
lowerCamelCase__ = pool.map(__snake_case ,__snake_case )
logger.info(F'Finished {num_proc} processes' )
lowerCamelCase__ = [obj for proc_res in mapped for obj in proc_res]
logger.info(F'Unpacked {len(__snake_case )} objects' )
return mapped
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> List[str]:
'''simple docstring'''
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name ,n_jobs=__snake_case ):
return joblib.Parallel()(
joblib.delayed(__snake_case )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def lowerCAmelCase__(__snake_case ) -> int:
'''simple docstring'''
lowerCamelCase__ = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
lowerCamelCase__ = None
| 29
| 1
|
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def lowerCAmelCase__(__snake_case ) -> Optional[int]:
'''simple docstring'''
if "img_encoder.pos_embed" in name:
lowerCamelCase__ = name.replace('''img_encoder.pos_embed''' ,'''vision_model.embeddings.position_embeddings''' )
if "img_encoder.patch_embed.proj" in name:
lowerCamelCase__ = name.replace('''img_encoder.patch_embed.proj''' ,'''vision_model.embeddings.patch_embeddings.projection''' )
if "img_encoder.patch_embed.norm" in name:
lowerCamelCase__ = name.replace('''img_encoder.patch_embed.norm''' ,'''vision_model.embeddings.layernorm''' )
if "img_encoder.layers" in name:
lowerCamelCase__ = name.replace('''img_encoder.layers''' ,'''vision_model.encoder.stages''' )
if "blocks" in name and "res" not in name:
lowerCamelCase__ = name.replace('''blocks''' ,'''layers''' )
if "attn" in name and "pre_assign" not in name:
lowerCamelCase__ = name.replace('''attn''' ,'''self_attn''' )
if "proj" in name and "self_attn" in name and "text" not in name:
lowerCamelCase__ = name.replace('''proj''' ,'''out_proj''' )
if "pre_assign_attn.attn.proj" in name:
lowerCamelCase__ = name.replace('''pre_assign_attn.attn.proj''' ,'''pre_assign_attn.attn.out_proj''' )
if "norm1" in name:
lowerCamelCase__ = name.replace('''norm1''' ,'''layer_norm1''' )
if "norm2" in name and "pre_assign" not in name:
lowerCamelCase__ = name.replace('''norm2''' ,'''layer_norm2''' )
if "img_encoder.norm" in name:
lowerCamelCase__ = name.replace('''img_encoder.norm''' ,'''vision_model.layernorm''' )
# text encoder
if "text_encoder.token_embedding" in name:
lowerCamelCase__ = name.replace('''text_encoder.token_embedding''' ,'''text_model.embeddings.token_embedding''' )
if "text_encoder.positional_embedding" in name:
lowerCamelCase__ = name.replace('''text_encoder.positional_embedding''' ,'''text_model.embeddings.position_embedding.weight''' )
if "text_encoder.transformer.resblocks." in name:
lowerCamelCase__ = name.replace('''text_encoder.transformer.resblocks.''' ,'''text_model.encoder.layers.''' )
if "ln_1" in name:
lowerCamelCase__ = name.replace('''ln_1''' ,'''layer_norm1''' )
if "ln_2" in name:
lowerCamelCase__ = name.replace('''ln_2''' ,'''layer_norm2''' )
if "c_fc" in name:
lowerCamelCase__ = name.replace('''c_fc''' ,'''fc1''' )
if "c_proj" in name:
lowerCamelCase__ = name.replace('''c_proj''' ,'''fc2''' )
if "text_encoder" in name:
lowerCamelCase__ = name.replace('''text_encoder''' ,'''text_model''' )
if "ln_final" in name:
lowerCamelCase__ = name.replace('''ln_final''' ,'''final_layer_norm''' )
# projection layers
if "img_projector.linear_hidden." in name:
lowerCamelCase__ = name.replace('''img_projector.linear_hidden.''' ,'''visual_projection.''' )
if "img_projector.linear_out." in name:
lowerCamelCase__ = name.replace('''img_projector.linear_out.''' ,'''visual_projection.3.''' )
if "text_projector.linear_hidden" in name:
lowerCamelCase__ = name.replace('''text_projector.linear_hidden''' ,'''text_projection''' )
if "text_projector.linear_out" in name:
lowerCamelCase__ = name.replace('''text_projector.linear_out''' ,'''text_projection.3''' )
return name
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Tuple:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCamelCase__ = orig_state_dict.pop(__snake_case )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCamelCase__ = key.split('''.''' )
lowerCamelCase__ , lowerCamelCase__ = int(key_split[2] ), int(key_split[4] )
lowerCamelCase__ = config.vision_config.hidden_size
if "weight" in key:
lowerCamelCase__ = val[:dim, :]
lowerCamelCase__ = val[dim : dim * 2, :]
lowerCamelCase__ = val[-dim:, :]
else:
lowerCamelCase__ = val[:dim]
lowerCamelCase__ = val[dim : dim * 2]
lowerCamelCase__ = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCamelCase__ = key.split('''.''' )
lowerCamelCase__ = int(key_split[3] )
lowerCamelCase__ = config.text_config.hidden_size
if "weight" in key:
lowerCamelCase__ = val[:dim, :]
lowerCamelCase__ = val[
dim : dim * 2, :
]
lowerCamelCase__ = val[-dim:, :]
else:
lowerCamelCase__ = val[:dim]
lowerCamelCase__ = val[dim : dim * 2]
lowerCamelCase__ = val[-dim:]
else:
lowerCamelCase__ = rename_key(__snake_case )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowerCamelCase__ = val.squeeze_()
else:
lowerCamelCase__ = val
return orig_state_dict
def lowerCAmelCase__() -> Tuple:
'''simple docstring'''
lowerCamelCase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase__ = Image.open(requests.get(__snake_case ,stream=__snake_case ).raw )
return im
@torch.no_grad()
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case="groupvit-gcc-yfcc" ,__snake_case=False ) -> List[str]:
'''simple docstring'''
lowerCamelCase__ = GroupViTConfig()
lowerCamelCase__ = GroupViTModel(__snake_case ).eval()
lowerCamelCase__ = torch.load(__snake_case ,map_location='''cpu''' )['''model''']
lowerCamelCase__ = convert_state_dict(__snake_case ,__snake_case )
lowerCamelCase__ , lowerCamelCase__ = model.load_state_dict(__snake_case ,strict=__snake_case )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(__snake_case ) == 0)
# verify result
lowerCamelCase__ = CLIPProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = processor(text=['''a photo of a cat''', '''a photo of a dog'''] ,images=__snake_case ,padding=__snake_case ,return_tensors='''pt''' )
with torch.no_grad():
lowerCamelCase__ = model(**__snake_case )
if model_name == "groupvit-gcc-yfcc":
lowerCamelCase__ = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
lowerCamelCase__ = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(F'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image ,__snake_case ,atol=1E-3 )
processor.save_pretrained(__snake_case )
model.save_pretrained(__snake_case )
print('''Successfully saved processor and model to''' ,__snake_case )
if push_to_hub:
print('''Pushing to the hub...''' )
processor.push_to_hub(__snake_case ,organization='''nielsr''' )
model.push_to_hub(__snake_case ,organization='''nielsr''' )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to dump the processor and PyTorch model."
)
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to GroupViT checkpoint")
parser.add_argument(
"--model_name",
default="groupvit-gccy-fcc",
type=str,
help="Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.",
)
_a = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 29
|
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = 42
class __A ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
@register_to_config
def __init__( self , __lowerCAmelCase = 1_6 , __lowerCAmelCase = 8_8 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = 1 , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 3_2 , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = "geglu" , __lowerCAmelCase = True , __lowerCAmelCase = True , ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = attention_head_dim
lowerCamelCase__ = num_attention_heads * attention_head_dim
lowerCamelCase__ = in_channels
lowerCamelCase__ = torch.nn.GroupNorm(num_groups=__lowerCAmelCase , num_channels=__lowerCAmelCase , eps=1E-6 , affine=__lowerCAmelCase )
lowerCamelCase__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase )
# 3. Define transformers blocks
lowerCamelCase__ = nn.ModuleList(
[
BasicTransformerBlock(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , dropout=__lowerCAmelCase , cross_attention_dim=__lowerCAmelCase , activation_fn=__lowerCAmelCase , attention_bias=__lowerCAmelCase , double_self_attention=__lowerCAmelCase , norm_elementwise_affine=__lowerCAmelCase , )
for d in range(__lowerCAmelCase )
] )
lowerCamelCase__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=1 , __lowerCAmelCase=None , __lowerCAmelCase = True , ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = hidden_states.shape
lowerCamelCase__ = batch_frames // num_frames
lowerCamelCase__ = hidden_states
lowerCamelCase__ = hidden_states[None, :].reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
lowerCamelCase__ = self.norm(__lowerCAmelCase )
lowerCamelCase__ = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , __lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = self.proj_in(__lowerCAmelCase )
# 2. Blocks
for block in self.transformer_blocks:
lowerCamelCase__ = block(
__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , timestep=__lowerCAmelCase , cross_attention_kwargs=__lowerCAmelCase , class_labels=__lowerCAmelCase , )
# 3. Output
lowerCamelCase__ = self.proj_out(__lowerCAmelCase )
lowerCamelCase__ = (
hidden_states[None, None, :]
.reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
lowerCamelCase__ = hidden_states.reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=__lowerCAmelCase )
| 29
| 1
|
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
_a = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
_a = typing.Union[np.floataa, int, float] # noqa: UP007
def lowerCAmelCase__(__snake_case ,__snake_case ) -> VectorOut:
'''simple docstring'''
return np.sqrt(np.sum((np.asarray(__snake_case ) - np.asarray(__snake_case )) ** 2 ) )
def lowerCAmelCase__(__snake_case ,__snake_case ) -> VectorOut:
'''simple docstring'''
return sum((va - va) ** 2 for va, va in zip(__snake_case ,__snake_case ) ) ** (1 / 2)
if __name__ == "__main__":
def lowerCAmelCase__() -> None:
'''simple docstring'''
from timeit import timeit
print('''Without Numpy''' )
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' ,number=10000 ,globals=globals() ,) )
print('''With Numpy''' )
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' ,number=10000 ,globals=globals() ,) )
benchmark()
| 29
|
_a = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
_a = [{"type": "code", "content": INSTALL_CONTENT}]
_a = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 29
| 1
|
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
_a = "http://www.mocksite.com/file1.txt"
_a = "\"text\": [\"foo\", \"foo\"]"
_a = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"
class __A :
'''simple docstring'''
lowerCAmelCase_ = 200
lowerCAmelCase_ = {"""Content-Length""": """100"""}
lowerCAmelCase_ = {}
def __lowerCamelCase ( self , **__lowerCAmelCase ):
'''simple docstring'''
return [bytes(__lowerCAmelCase , '''utf-8''' )]
def lowerCAmelCase__(*__snake_case ,**__snake_case ) -> Dict:
'''simple docstring'''
return MockResponse()
@pytest.mark.parametrize('''urls_type''' ,[str, list, dict] )
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> Tuple:
'''simple docstring'''
import requests
monkeypatch.setattr(__snake_case ,'''request''' ,__snake_case )
lowerCamelCase__ = URL
if issubclass(__snake_case ,__snake_case ):
lowerCamelCase__ = url
elif issubclass(__snake_case ,__snake_case ):
lowerCamelCase__ = [url]
elif issubclass(__snake_case ,__snake_case ):
lowerCamelCase__ = {'''train''': url}
lowerCamelCase__ = '''dummy'''
lowerCamelCase__ = '''downloads'''
lowerCamelCase__ = tmp_path
lowerCamelCase__ = DownloadConfig(
cache_dir=os.path.join(__snake_case ,__snake_case ) ,use_etag=__snake_case ,)
lowerCamelCase__ = DownloadManager(dataset_name=__snake_case ,download_config=__snake_case )
lowerCamelCase__ = dl_manager.download(__snake_case )
lowerCamelCase__ = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(__snake_case ,__snake_case ):
lowerCamelCase__ = [downloaded_paths]
lowerCamelCase__ = [urls]
elif isinstance(__snake_case ,__snake_case ):
assert "train" in downloaded_paths.keys()
lowerCamelCase__ = downloaded_paths.values()
lowerCamelCase__ = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(__snake_case ,__snake_case ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
lowerCamelCase__ = Path(__snake_case )
lowerCamelCase__ = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
lowerCamelCase__ = downloaded_path.read_text()
assert content == CONTENT
lowerCamelCase__ = downloaded_path.with_suffix('''.json''' )
assert metadata_downloaded_path.exists()
lowerCamelCase__ = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('''paths_type''' ,[str, list, dict] )
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = str(__snake_case )
if issubclass(__snake_case ,__snake_case ):
lowerCamelCase__ = filename
elif issubclass(__snake_case ,__snake_case ):
lowerCamelCase__ = [filename]
elif issubclass(__snake_case ,__snake_case ):
lowerCamelCase__ = {'''train''': filename}
lowerCamelCase__ = '''dummy'''
lowerCamelCase__ = xz_file.parent
lowerCamelCase__ = '''extracted'''
lowerCamelCase__ = DownloadConfig(
cache_dir=__snake_case ,use_etag=__snake_case ,)
lowerCamelCase__ = DownloadManager(dataset_name=__snake_case ,download_config=__snake_case )
lowerCamelCase__ = dl_manager.extract(__snake_case )
lowerCamelCase__ = paths
for extracted_paths in [extracted_paths]:
if isinstance(__snake_case ,__snake_case ):
lowerCamelCase__ = [extracted_paths]
lowerCamelCase__ = [paths]
elif isinstance(__snake_case ,__snake_case ):
assert "train" in extracted_paths.keys()
lowerCamelCase__ = extracted_paths.values()
lowerCamelCase__ = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(__snake_case ,__snake_case ):
assert extracted_path == dl_manager.extracted_paths[input_path]
lowerCamelCase__ = Path(__snake_case )
lowerCamelCase__ = extracted_path.parts
assert parts[-1] == hash_url_to_filename(__snake_case ,etag=__snake_case )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
lowerCamelCase__ = extracted_path.read_text()
lowerCamelCase__ = text_file.read_text()
assert extracted_file_content == expected_file_content
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Tuple:
'''simple docstring'''
assert path.endswith('''.jsonl''' )
for num_items, line in enumerate(__snake_case ,start=1 ):
lowerCamelCase__ = json.loads(line.decode('''utf-8''' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('''archive_jsonl''' ,['''tar_jsonl_path''', '''zip_jsonl_path'''] )
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Dict:
'''simple docstring'''
lowerCamelCase__ = request.getfixturevalue(__snake_case )
lowerCamelCase__ = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(__snake_case ) ,start=1 ):
_test_jsonl(__snake_case ,__snake_case )
assert num_jsonl == 2
@pytest.mark.parametrize('''archive_nested_jsonl''' ,['''tar_nested_jsonl_path''', '''zip_nested_jsonl_path'''] )
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = request.getfixturevalue(__snake_case )
lowerCamelCase__ = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(__snake_case ) ,start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(__snake_case ) ,start=1 ):
_test_jsonl(__snake_case ,__snake_case )
assert num_tar == 1
assert num_jsonl == 2
def lowerCAmelCase__(__snake_case ) -> List[Any]:
'''simple docstring'''
lowerCamelCase__ = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(__snake_case ) ,start=1 ):
assert os.path.basename(__snake_case ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 29
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_a = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
_a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 29
| 1
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class __A ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = tempfile.mkdtemp()
# fmt: off
lowerCamelCase__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
lowerCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowerCamelCase__ = {
'''do_resize''': True,
'''size''': {'''height''': 1_8, '''width''': 1_8},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
lowerCamelCase__ = os.path.join(self.tmpdirname , __lowerCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
def __lowerCamelCase ( self , **__lowerCAmelCase ):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def __lowerCamelCase ( self , **__lowerCAmelCase ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
lowerCamelCase__ = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowerCamelCase__ = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 )
lowerCamelCase__ = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
lowerCamelCase__ = self.prepare_image_inputs()
lowerCamelCase__ = image_processor(__lowerCAmelCase , return_tensors='''np''' )
lowerCamelCase__ = processor(images=__lowerCAmelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
lowerCamelCase__ = '''lower newer'''
lowerCamelCase__ = processor(text=__lowerCAmelCase )
lowerCamelCase__ = tokenizer(__lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
lowerCamelCase__ = '''lower newer'''
lowerCamelCase__ = self.prepare_image_inputs()
lowerCamelCase__ = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(__lowerCAmelCase ):
processor()
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
lowerCamelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase__ = processor.batch_decode(__lowerCAmelCase )
lowerCamelCase__ = tokenizer.batch_decode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = VisionTextDualEncoderProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
lowerCamelCase__ = '''lower newer'''
lowerCamelCase__ = self.prepare_image_inputs()
lowerCamelCase__ = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 29
|
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
_a = logging.get_logger(__name__)
class __A ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
warnings.warn(
'''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use OwlViTImageProcessor instead.''' , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
| 29
| 1
|
def lowerCAmelCase__(__snake_case ,__snake_case ) -> int:
'''simple docstring'''
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def lowerCAmelCase__(__snake_case ,__snake_case=0 ) -> Optional[Any]:
'''simple docstring'''
return sorted(__snake_case ,key=lambda __snake_case : x[column] )
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case=float('''inf''' ) ) -> int:
'''simple docstring'''
for i in range(points_counts - 1 ):
for j in range(i + 1 ,__snake_case ):
lowerCamelCase__ = euclidean_distance_sqr(points[i] ,points[j] )
if current_dis < min_dis:
lowerCamelCase__ = current_dis
return min_dis
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case=float('''inf''' ) ) -> Dict:
'''simple docstring'''
for i in range(min(6 ,points_counts - 1 ) ,__snake_case ):
for j in range(max(0 ,i - 6 ) ,__snake_case ):
lowerCamelCase__ = euclidean_distance_sqr(points[i] ,points[j] )
if current_dis < min_dis:
lowerCamelCase__ = current_dis
return min_dis
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> Union[str, Any]:
'''simple docstring'''
if points_counts <= 3:
return dis_between_closest_pair(__snake_case ,__snake_case )
# recursion
lowerCamelCase__ = points_counts // 2
lowerCamelCase__ = closest_pair_of_points_sqr(
__snake_case ,points_sorted_on_y[:mid] ,__snake_case )
lowerCamelCase__ = closest_pair_of_points_sqr(
__snake_case ,points_sorted_on_y[mid:] ,points_counts - mid )
lowerCamelCase__ = min(__snake_case ,__snake_case )
lowerCamelCase__ = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(__snake_case )
lowerCamelCase__ = dis_between_closest_in_strip(
__snake_case ,len(__snake_case ) ,__snake_case )
return min(__snake_case ,__snake_case )
def lowerCAmelCase__(__snake_case ,__snake_case ) -> int:
'''simple docstring'''
lowerCamelCase__ = column_based_sort(__snake_case ,column=0 )
lowerCamelCase__ = column_based_sort(__snake_case ,column=1 )
return (
closest_pair_of_points_sqr(
__snake_case ,__snake_case ,__snake_case )
) ** 0.5
if __name__ == "__main__":
_a = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print("Distance:", closest_pair_of_points(points, len(points)))
| 29
|
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> Any:
'''simple docstring'''
lowerCamelCase__ = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowerCamelCase__ = {
'''wmt16-en-de-dist-12-1''': [2_8.3, 2_7.5_2],
'''wmt16-en-de-dist-6-1''': [2_7.4, 2_7.1_1],
'''wmt16-en-de-12-1''': [2_6.9, 2_5.7_5],
}
lowerCamelCase__ = F'{src_lang}-{tgt_lang}'
lowerCamelCase__ = F'\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "allenai/{model_name}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n'
model_card_dir.mkdir(parents=__snake_case ,exist_ok=__snake_case )
lowerCamelCase__ = os.path.join(__snake_case ,'''README.md''' )
print(F'Generating {path}' )
with open(__snake_case ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(__snake_case )
# make sure we are under the root of the project
_a = Path(__file__).resolve().parent.parent.parent
_a = repo_dir / "model_cards"
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
_a = model_cards_dir / "allenai" / model_name
write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
| 29
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_a = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
_a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 29
|
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
_a = logging.get_logger(__name__)
class __A ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
warnings.warn(
'''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use SegformerImageProcessor instead.''' , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
| 29
| 1
|
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __A ( lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = PhobertTokenizer
lowerCAmelCase_ = False
def __lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase__ = ['''T@@''', '''i''', '''I''', '''R@@''', '''r''', '''e@@''']
lowerCamelCase__ = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
lowerCamelCase__ = ['''#version: 0.2''', '''l à</w>''']
lowerCamelCase__ = {'''unk_token''': '''<unk>'''}
lowerCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(F'{token} {vocab_tokens[token]}\n' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowerCAmelCase ) )
def __lowerCamelCase ( self , **__lowerCAmelCase ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = '''Tôi là VinAI Research'''
lowerCamelCase__ = '''T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'''
return input_text, output_text
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCamelCase__ = '''Tôi là VinAI Research'''
lowerCamelCase__ = '''T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'''.split()
lowerCamelCase__ = tokenizer.tokenize(__lowerCAmelCase )
print(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = tokens + [tokenizer.unk_token]
lowerCamelCase__ = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
| 29
|
from queue import PriorityQueue
from typing import Any
import numpy as np
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,) -> float | int:
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
lowerCamelCase__ = cst_fwd.get(__snake_case ,np.inf )
lowerCamelCase__ = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
lowerCamelCase__ = new_cost_f
lowerCamelCase__ = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
lowerCamelCase__ = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> int:
'''simple docstring'''
lowerCamelCase__ = -1
lowerCamelCase__ = set()
lowerCamelCase__ = set()
lowerCamelCase__ = {source: 0}
lowerCamelCase__ = {destination: 0}
lowerCamelCase__ = {source: None}
lowerCamelCase__ = {destination: None}
lowerCamelCase__ = PriorityQueue()
lowerCamelCase__ = PriorityQueue()
lowerCamelCase__ = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
lowerCamelCase__ , lowerCamelCase__ = queue_forward.get()
visited_forward.add(__snake_case )
lowerCamelCase__ , lowerCamelCase__ = queue_backward.get()
visited_backward.add(__snake_case )
lowerCamelCase__ = pass_and_relaxation(
__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,)
lowerCamelCase__ = pass_and_relaxation(
__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,)
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
lowerCamelCase__ = shortest_distance
return shortest_path_distance
_a = {
"B": [["C", 1]],
"C": [["D", 1]],
"D": [["F", 1]],
"E": [["B", 1], ["G", 2]],
"F": [],
"G": [["F", 1]],
}
_a = {
"B": [["E", 1]],
"C": [["B", 1]],
"D": [["C", 1]],
"F": [["D", 1], ["G", 1]],
"E": [[None, np.inf]],
"G": [["E", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29
| 1
|
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
_a = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class __A ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
requires_backends(self , '''vision''' )
self.check_model_type(__lowerCAmelCase )
def __call__( self , __lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase )
def __lowerCamelCase ( self , **__lowerCAmelCase ):
'''simple docstring'''
return {}, {}, {}
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = load_image(__lowerCAmelCase )
lowerCamelCase__ = image.size
lowerCamelCase__ = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework )
return model_inputs
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.model(**__lowerCAmelCase )
return model_outputs
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = model_outputs.predicted_depth
lowerCamelCase__ = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=__lowerCAmelCase )
lowerCamelCase__ = prediction.squeeze().cpu().numpy()
lowerCamelCase__ = (output * 2_5_5 / np.max(__lowerCAmelCase )).astype('''uint8''' )
lowerCamelCase__ = Image.fromarray(__lowerCAmelCase )
lowerCamelCase__ = {}
lowerCamelCase__ = predicted_depth
lowerCamelCase__ = depth
return output_dict
| 29
|
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = """ClapFeatureExtractor"""
lowerCAmelCase_ = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
def __call__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = kwargs.pop('''sampling_rate''' , __lowerCAmelCase )
if text is None and audios is None:
raise ValueError('''You have to specify either text or audios. Both cannot be none.''' )
if text is not None:
lowerCamelCase__ = self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if audios is not None:
lowerCamelCase__ = self.feature_extractor(
__lowerCAmelCase , sampling_rate=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if text is not None and audios is not None:
lowerCamelCase__ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCAmelCase ) , tensor_type=__lowerCAmelCase )
def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.tokenizer.model_input_names
lowerCamelCase__ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 29
| 1
|
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ = (
{
"""feature-extraction""": TFMobileBertModel,
"""fill-mask""": TFMobileBertForMaskedLM,
"""question-answering""": TFMobileBertForQuestionAnswering,
"""text-classification""": TFMobileBertForSequenceClassification,
"""token-classification""": TFMobileBertForTokenClassification,
"""zero-shot""": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ):
'''simple docstring'''
lowerCamelCase__ = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class in get_values(__lowerCAmelCase ):
lowerCamelCase__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class __A ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=9_9 , __lowerCAmelCase=3_2 , __lowerCAmelCase=3_2 , __lowerCAmelCase=2 , __lowerCAmelCase=4 , __lowerCAmelCase=3_7 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=1_6 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , ):
'''simple docstring'''
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_input_mask
lowerCamelCase__ = use_token_type_ids
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = type_sequence_label_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = num_labels
lowerCamelCase__ = num_choices
lowerCamelCase__ = scope
lowerCamelCase__ = embedding_size
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ = None
if self.use_token_type_ids:
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = TFMobileBertModel(config=__lowerCAmelCase )
lowerCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCamelCase__ = model(__lowerCAmelCase )
lowerCamelCase__ = [input_ids, input_mask]
lowerCamelCase__ = model(__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = TFMobileBertForMaskedLM(config=__lowerCAmelCase )
lowerCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = TFMobileBertForNextSentencePrediction(config=__lowerCAmelCase )
lowerCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = TFMobileBertForPreTraining(config=__lowerCAmelCase )
lowerCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = TFMobileBertForSequenceClassification(config=__lowerCAmelCase )
lowerCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.num_choices
lowerCamelCase__ = TFMobileBertForMultipleChoice(config=__lowerCAmelCase )
lowerCamelCase__ = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowerCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = TFMobileBertForTokenClassification(config=__lowerCAmelCase )
lowerCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = TFMobileBertForQuestionAnswering(config=__lowerCAmelCase )
lowerCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = config_and_inputs
lowerCamelCase__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = TFMobileBertModelTest.TFMobileBertModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowerCAmelCase )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in ["google/mobilebert-uncased"]:
lowerCamelCase__ = TFMobileBertModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_tf
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' )
lowerCamelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase__ = model(__lowerCAmelCase )[0]
lowerCamelCase__ = [1, 6, 3_0_5_2_2]
self.assertEqual(output.shape , __lowerCAmelCase )
lowerCamelCase__ = tf.constant(
[
[
[-4.591_9547, -9.24_8295, -9.64_5256],
[-6.730_6175, -6.44_0284, -6.605_2837],
[-7.274_3506, -6.784_7915, -6.02_4673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCAmelCase , atol=1E-4 )
| 29
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __A :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=9_9 , __lowerCAmelCase=3_2 , __lowerCAmelCase=2 , __lowerCAmelCase=4 , __lowerCAmelCase=3_7 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=1_6 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , __lowerCAmelCase=0 , ):
'''simple docstring'''
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_input_mask
lowerCamelCase__ = use_token_type_ids
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = type_sequence_label_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = num_labels
lowerCamelCase__ = num_choices
lowerCamelCase__ = scope
lowerCamelCase__ = projection_dim
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ = None
if self.use_token_type_ids:
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , )
lowerCamelCase__ = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = TFDPRContextEncoder(config=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = TFDPRQuestionEncoder(config=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = TFDPRReader(config=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = config_and_inputs
lowerCamelCase__ = {'''input_ids''': input_ids}
return config, inputs_dict
@require_tf
class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ = {"""feature-extraction""": TFDPRQuestionEncoder} if is_tf_available() else {}
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = TFDPRModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*__lowerCAmelCase )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFDPRContextEncoder.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFDPRContextEncoder.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFDPRQuestionEncoder.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFDPRReader.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_tf
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''' )
lowerCamelCase__ = tf.constant(
[[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_0_3, 2_0_2_6, 3_8_9_9, 1_0_1_4_0, 1_0_2_9, 1_0_2]] ) # [CLS] hello, is my dog cute? [SEP]
lowerCamelCase__ = model(__lowerCAmelCase )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
lowerCamelCase__ = tf.constant(
[
[
0.0323_6253,
0.1275_3335,
0.1681_8509,
0.0027_9786,
0.389_6933,
0.2426_4945,
0.217_8971,
-0.0233_5227,
-0.0848_1959,
-0.1432_4117,
]
] )
self.assertTrue(numpy.allclose(output[:, :1_0].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 29
| 1
|
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = """ClapFeatureExtractor"""
lowerCAmelCase_ = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
def __call__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = kwargs.pop('''sampling_rate''' , __lowerCAmelCase )
if text is None and audios is None:
raise ValueError('''You have to specify either text or audios. Both cannot be none.''' )
if text is not None:
lowerCamelCase__ = self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if audios is not None:
lowerCamelCase__ = self.feature_extractor(
__lowerCAmelCase , sampling_rate=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if text is not None and audios is not None:
lowerCamelCase__ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCAmelCase ) , tensor_type=__lowerCAmelCase )
def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.tokenizer.model_input_names
lowerCamelCase__ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 29
|
import string
from math import logaa
def lowerCAmelCase__(__snake_case ,__snake_case ) -> int:
'''simple docstring'''
lowerCamelCase__ = document.translate(
str.maketrans('''''' ,'''''' ,string.punctuation ) ).replace('''\n''' ,'''''' )
lowerCamelCase__ = document_without_punctuation.split(''' ''' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def lowerCAmelCase__(__snake_case ,__snake_case ) -> tuple[int, int]:
'''simple docstring'''
lowerCamelCase__ = corpus.lower().translate(
str.maketrans('''''' ,'''''' ,string.punctuation ) ) # strip all punctuation and replace it with ''
lowerCamelCase__ = corpus_without_punctuation.split('''\n''' )
lowerCamelCase__ = term.lower()
return (len([doc for doc in docs if term in doc] ), len(__snake_case ))
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case=False ) -> float:
'''simple docstring'''
if smoothing:
if n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(1 + logaa(n / (1 + df) ) ,3 )
if df == 0:
raise ZeroDivisionError('''df must be > 0''' )
elif n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(logaa(n / df ) ,3 )
def lowerCAmelCase__(__snake_case ,__snake_case ) -> float:
'''simple docstring'''
return round(tf * idf ,3 )
| 29
| 1
|
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
_a = logging.getLogger(__name__)
def lowerCAmelCase__() -> Tuple:
'''simple docstring'''
lowerCamelCase__ = argparse.ArgumentParser(
description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' )
parser.add_argument('''--file_path''' ,type=__snake_case ,default='''data/dump.txt''' ,help='''The path to the data.''' )
parser.add_argument('''--tokenizer_type''' ,type=__snake_case ,default='''bert''' ,choices=['''bert''', '''roberta''', '''gpt2'''] )
parser.add_argument('''--tokenizer_name''' ,type=__snake_case ,default='''bert-base-uncased''' ,help='''The tokenizer to use.''' )
parser.add_argument('''--dump_file''' ,type=__snake_case ,default='''data/dump''' ,help='''The dump file prefix.''' )
lowerCamelCase__ = parser.parse_args()
logger.info(F'Loading Tokenizer ({args.tokenizer_name})' )
if args.tokenizer_type == "bert":
lowerCamelCase__ = BertTokenizer.from_pretrained(args.tokenizer_name )
lowerCamelCase__ = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]`
lowerCamelCase__ = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]`
elif args.tokenizer_type == "roberta":
lowerCamelCase__ = RobertaTokenizer.from_pretrained(args.tokenizer_name )
lowerCamelCase__ = tokenizer.special_tokens_map['''cls_token'''] # `<s>`
lowerCamelCase__ = tokenizer.special_tokens_map['''sep_token'''] # `</s>`
elif args.tokenizer_type == "gpt2":
lowerCamelCase__ = GPTaTokenizer.from_pretrained(args.tokenizer_name )
lowerCamelCase__ = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>`
lowerCamelCase__ = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>`
logger.info(F'Loading text from {args.file_path}' )
with open(args.file_path ,'''r''' ,encoding='''utf8''' ) as fp:
lowerCamelCase__ = fp.readlines()
logger.info('''Start encoding''' )
logger.info(F'{len(__snake_case )} examples to process.' )
lowerCamelCase__ = []
lowerCamelCase__ = 0
lowerCamelCase__ = 10000
lowerCamelCase__ = time.time()
for text in data:
lowerCamelCase__ = F'{bos} {text.strip()} {sep}'
lowerCamelCase__ = tokenizer.encode(__snake_case ,add_special_tokens=__snake_case )
rslt.append(__snake_case )
iter += 1
if iter % interval == 0:
lowerCamelCase__ = time.time()
logger.info(F'{iter} examples processed. - {(end-start):.2f}s/{interval}expl' )
lowerCamelCase__ = time.time()
logger.info('''Finished binarization''' )
logger.info(F'{len(__snake_case )} examples processed.' )
lowerCamelCase__ = F'{args.dump_file}.{args.tokenizer_name}.pickle'
lowerCamelCase__ = tokenizer.vocab_size
if vocab_size < (1 << 16):
lowerCamelCase__ = [np.uintaa(__snake_case ) for d in rslt]
else:
lowerCamelCase__ = [np.intaa(__snake_case ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F'Dump to {dp_file}' )
with open(__snake_case ,'''wb''' ) as handle:
pickle.dump(rslt_ ,__snake_case ,protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 29
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_a = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 29
| 1
|
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
"snap-research/efficientformer-l1-300": (
"https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"
),
}
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = """efficientformer"""
def __init__( self , __lowerCAmelCase = [3, 2, 6, 4] , __lowerCAmelCase = [4_8, 9_6, 2_2_4, 4_4_8] , __lowerCAmelCase = [True, True, True, True] , __lowerCAmelCase = 4_4_8 , __lowerCAmelCase = 3_2 , __lowerCAmelCase = 4 , __lowerCAmelCase = 7 , __lowerCAmelCase = 5 , __lowerCAmelCase = 8 , __lowerCAmelCase = 4 , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 1_6 , __lowerCAmelCase = 3 , __lowerCAmelCase = 3 , __lowerCAmelCase = 3 , __lowerCAmelCase = 2 , __lowerCAmelCase = 1 , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 1 , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = 1E-5 , __lowerCAmelCase = "gelu" , __lowerCAmelCase = 0.02 , __lowerCAmelCase = 1E-12 , __lowerCAmelCase = 2_2_4 , __lowerCAmelCase = 1E-05 , **__lowerCAmelCase , ):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = hidden_sizes
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = initializer_range
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = patch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = depths
lowerCamelCase__ = mlp_expansion_ratio
lowerCamelCase__ = downsamples
lowerCamelCase__ = dim
lowerCamelCase__ = key_dim
lowerCamelCase__ = attention_ratio
lowerCamelCase__ = resolution
lowerCamelCase__ = pool_size
lowerCamelCase__ = downsample_patch_size
lowerCamelCase__ = downsample_stride
lowerCamelCase__ = downsample_pad
lowerCamelCase__ = drop_path_rate
lowerCamelCase__ = num_metaad_blocks
lowerCamelCase__ = distillation
lowerCamelCase__ = use_layer_scale
lowerCamelCase__ = layer_scale_init_value
lowerCamelCase__ = image_size
lowerCamelCase__ = batch_norm_eps
| 29
|
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
_a = namedtuple(
"_TestCommandArgs",
[
"dataset",
"name",
"cache_dir",
"data_dir",
"all_configs",
"save_infos",
"ignore_verifications",
"force_redownload",
"clear_cache",
],
defaults=[None, None, None, False, False, False, False, False],
)
def lowerCAmelCase__(__snake_case ,__snake_case ) -> List[str]:
'''simple docstring'''
return (abs(source - target ) / target) < 0.0_1
@pytest.mark.integration
def lowerCAmelCase__(__snake_case ) -> Tuple:
'''simple docstring'''
lowerCamelCase__ = _TestCommandArgs(dataset=__snake_case ,all_configs=__snake_case ,save_infos=__snake_case )
lowerCamelCase__ = TestCommand(*__snake_case )
test_command.run()
lowerCamelCase__ = os.path.join(__snake_case ,'''README.md''' )
assert os.path.exists(__snake_case )
lowerCamelCase__ = DatasetInfosDict.from_directory(__snake_case )
lowerCamelCase__ = DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ) ,splits=[
{
'''name''': '''train''',
'''num_bytes''': 2351563,
'''num_examples''': 10000,
},
{
'''name''': '''validation''',
'''num_bytes''': 238418,
'''num_examples''': 1000,
},
] ,download_size=3940680 ,dataset_size=2589981 ,)
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowerCamelCase__ , lowerCamelCase__ = getattr(dataset_infos['''default'''] ,__snake_case ), getattr(expected_dataset_infos['''default'''] ,__snake_case )
if key == "num_bytes":
assert is_apercent_close(__snake_case ,__snake_case )
elif key == "splits":
assert list(__snake_case ) == list(__snake_case )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes ,expected[split].num_bytes )
else:
result == expected
| 29
| 1
|
import os
def lowerCAmelCase__() -> List[Any]:
'''simple docstring'''
with open(os.path.dirname(__snake_case ) + '''/grid.txt''' ) as f:
lowerCamelCase__ = [] # noqa: E741
for _ in range(20 ):
l.append([int(__snake_case ) for x in f.readline().split()] )
lowerCamelCase__ = 0
# right
for i in range(20 ):
for j in range(17 ):
lowerCamelCase__ = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
lowerCamelCase__ = temp
# down
for i in range(17 ):
for j in range(20 ):
lowerCamelCase__ = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
lowerCamelCase__ = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
lowerCamelCase__ = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
lowerCamelCase__ = temp
# diagonal 2
for i in range(17 ):
for j in range(3 ,20 ):
lowerCamelCase__ = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
lowerCamelCase__ = temp
return maximum
if __name__ == "__main__":
print(solution())
| 29
|
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class __A :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = parent
lowerCamelCase__ = 1_3
lowerCamelCase__ = 7
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = 9_9
lowerCamelCase__ = 3_2
lowerCamelCase__ = 2
lowerCamelCase__ = 4
lowerCamelCase__ = 3_7
lowerCamelCase__ = '''gelu'''
lowerCamelCase__ = 0.1
lowerCamelCase__ = 0.1
lowerCamelCase__ = 5_1_2
lowerCamelCase__ = 1_6
lowerCamelCase__ = 2
lowerCamelCase__ = 0.02
lowerCamelCase__ = 3
lowerCamelCase__ = 4
lowerCamelCase__ = None
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self ):
'''simple docstring'''
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = self.prepare_config_and_inputs()
lowerCamelCase__ = True
lowerCamelCase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = TFEsmModel(config=__lowerCAmelCase )
lowerCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowerCamelCase__ = model(__lowerCAmelCase )
lowerCamelCase__ = [input_ids, input_mask]
lowerCamelCase__ = model(__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = True
lowerCamelCase__ = TFEsmModel(config=__lowerCAmelCase )
lowerCamelCase__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''encoder_hidden_states''': encoder_hidden_states,
'''encoder_attention_mask''': encoder_attention_mask,
}
lowerCamelCase__ = model(__lowerCAmelCase )
lowerCamelCase__ = [input_ids, input_mask]
lowerCamelCase__ = model(__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase )
# Also check the case where encoder outputs are not passed
lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = TFEsmForMaskedLM(config=__lowerCAmelCase )
lowerCamelCase__ = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = TFEsmForTokenClassification(config=__lowerCAmelCase )
lowerCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowerCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = config_and_inputs
lowerCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ = (
{
"""feature-extraction""": TFEsmModel,
"""fill-mask""": TFEsmForMaskedLM,
"""text-classification""": TFEsmForSequenceClassification,
"""token-classification""": TFEsmForTokenClassification,
"""zero-shot""": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = TFEsmModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFEsmModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@unittest.skip('''Protein models do not support embedding resizing.''' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip('''Protein models do not support embedding resizing.''' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(__lowerCAmelCase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowerCamelCase__ = model.get_bias()
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
for k, v in name.items():
assert isinstance(__lowerCAmelCase , tf.Variable )
else:
lowerCamelCase__ = model.get_output_embeddings()
assert x is None
lowerCamelCase__ = model.get_bias()
assert name is None
@require_tf
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = TFEsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
lowerCamelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase__ = model(__lowerCAmelCase )[0]
lowerCamelCase__ = [1, 6, 3_3]
self.assertEqual(list(output.numpy().shape ) , __lowerCAmelCase )
# compare the actual values for a slice.
lowerCamelCase__ = tf.constant(
[
[
[8.92_1518, -10.58_9814, -6.467_1307],
[-6.396_7156, -13.91_1377, -1.121_1915],
[-7.78_1247, -13.95_1557, -3.74_0592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = TFEsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
lowerCamelCase__ = tf.constant([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
lowerCamelCase__ = model(__lowerCAmelCase )[0]
# compare the actual values for a slice.
lowerCamelCase__ = tf.constant(
[
[
[0.1444_3092, 0.5412_5327, 0.324_7739],
[0.3034_0484, 0.0052_6676, 0.3107_7722],
[0.3227_8043, -0.2498_7096, 0.341_4628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 29
| 1
|
_a = 8.3_144_598
def lowerCAmelCase__(__snake_case ,__snake_case ) -> float:
'''simple docstring'''
if temperature < 0:
raise Exception('''Temperature cannot be less than 0 K''' )
if molar_mass <= 0:
raise Exception('''Molar mass cannot be less than or equal to 0 kg/mol''' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_a = 300
_a = 28
_a = rms_speed_of_molecule(temperature, molar_mass)
print(f"""Vrms of Nitrogen gas at 300 K is {vrms} m/s""")
| 29
|
from math import sqrt
def lowerCAmelCase__(__snake_case ) -> bool:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (
number >= 0
), "'number' must been an int and positive"
lowerCamelCase__ = True
# 0 and 1 are none primes.
if number <= 1:
lowerCamelCase__ = False
for divisor in range(2 ,int(round(sqrt(__snake_case ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowerCamelCase__ = False
break
# precondition
assert isinstance(__snake_case ,__snake_case ), "'status' must been from type bool"
return status
def lowerCAmelCase__(__snake_case ) -> Any:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowerCamelCase__ = list(range(2 ,n + 1 ) )
lowerCamelCase__ = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(__snake_case ) ):
for j in range(i + 1 ,len(__snake_case ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowerCamelCase__ = 0
# filters actual prime numbers.
lowerCamelCase__ = [x for x in begin_list if x != 0]
# precondition
assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type list"
return ans
def lowerCAmelCase__(__snake_case ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (n > 2), "'N' must been an int and > 2"
lowerCamelCase__ = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 ,n + 1 ):
if is_prime(__snake_case ):
ans.append(__snake_case )
# precondition
assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type list"
return ans
def lowerCAmelCase__(__snake_case ) -> List[str]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and number >= 0, "'number' must been an int and >= 0"
lowerCamelCase__ = [] # this list will be returns of the function.
# potential prime number factors.
lowerCamelCase__ = 2
lowerCamelCase__ = number
if number == 0 or number == 1:
ans.append(__snake_case )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(__snake_case ):
while quotient != 1:
if is_prime(__snake_case ) and (quotient % factor == 0):
ans.append(__snake_case )
quotient /= factor
else:
factor += 1
else:
ans.append(__snake_case )
# precondition
assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type list"
return ans
def lowerCAmelCase__(__snake_case ) -> List[Any]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCamelCase__ = 0
# prime factorization of 'number'
lowerCamelCase__ = prime_factorization(__snake_case )
lowerCamelCase__ = max(__snake_case )
# precondition
assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type int"
return ans
def lowerCAmelCase__(__snake_case ) -> Dict:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCamelCase__ = 0
# prime factorization of 'number'
lowerCamelCase__ = prime_factorization(__snake_case )
lowerCamelCase__ = min(__snake_case )
# precondition
assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type int"
return ans
def lowerCAmelCase__(__snake_case ) -> List[Any]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ), "'number' must been an int"
assert isinstance(number % 2 == 0 ,__snake_case ), "compare bust been from type bool"
return number % 2 == 0
def lowerCAmelCase__(__snake_case ) -> List[str]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ), "'number' must been an int"
assert isinstance(number % 2 != 0 ,__snake_case ), "compare bust been from type bool"
return number % 2 != 0
def lowerCAmelCase__(__snake_case ) -> List[Any]:
'''simple docstring'''
assert (
isinstance(__snake_case ,__snake_case ) and (number > 2) and is_even(__snake_case )
), "'number' must been an int, even and > 2"
lowerCamelCase__ = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowerCamelCase__ = get_prime_numbers(__snake_case )
lowerCamelCase__ = len(__snake_case )
# run variable for while-loops.
lowerCamelCase__ = 0
lowerCamelCase__ = None
# exit variable. for break up the loops
lowerCamelCase__ = True
while i < len_pn and loop:
lowerCamelCase__ = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowerCamelCase__ = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(__snake_case ,__snake_case )
and (len(__snake_case ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowerCAmelCase__(__snake_case ,__snake_case ) -> str:
'''simple docstring'''
assert (
isinstance(__snake_case ,__snake_case )
and isinstance(__snake_case ,__snake_case )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowerCamelCase__ = 0
while numbera != 0:
lowerCamelCase__ = numbera % numbera
lowerCamelCase__ = numbera
lowerCamelCase__ = rest
# precondition
assert isinstance(__snake_case ,__snake_case ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Any:
'''simple docstring'''
assert (
isinstance(__snake_case ,__snake_case )
and isinstance(__snake_case ,__snake_case )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowerCamelCase__ = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowerCamelCase__ = prime_factorization(__snake_case )
lowerCamelCase__ = prime_factorization(__snake_case )
elif numbera == 1 or numbera == 1:
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = max(__snake_case ,__snake_case )
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowerCamelCase__ = prime_fac_a.count(__snake_case )
lowerCamelCase__ = prime_fac_a.count(__snake_case )
for _ in range(max(__snake_case ,__snake_case ) ):
ans *= n
else:
lowerCamelCase__ = prime_fac_a.count(__snake_case )
for _ in range(__snake_case ):
ans *= n
done.append(__snake_case )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowerCamelCase__ = prime_fac_a.count(__snake_case )
for _ in range(__snake_case ):
ans *= n
done.append(__snake_case )
# precondition
assert isinstance(__snake_case ,__snake_case ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowerCAmelCase__(__snake_case ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (n >= 0), "'number' must been a positive int"
lowerCamelCase__ = 0
lowerCamelCase__ = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(__snake_case ):
ans += 1
# precondition
assert isinstance(__snake_case ,__snake_case ) and is_prime(
__snake_case ), "'ans' must been a prime number and from type int"
return ans
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Dict:
'''simple docstring'''
assert (
is_prime(__snake_case ) and is_prime(__snake_case ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowerCamelCase__ = p_number_a + 1 # jump to the next number
lowerCamelCase__ = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(__snake_case ):
number += 1
while number < p_number_a:
ans.append(__snake_case )
number += 1
# fetch the next prime number.
while not is_prime(__snake_case ):
number += 1
# precondition
assert (
isinstance(__snake_case ,__snake_case )
and ans[0] != p_number_a
and ans[len(__snake_case ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowerCAmelCase__(__snake_case ) -> Tuple:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (n >= 1), "'n' must been int and >= 1"
lowerCamelCase__ = [] # will be returned.
for divisor in range(1 ,n + 1 ):
if n % divisor == 0:
ans.append(__snake_case )
# precondition
assert ans[0] == 1 and ans[len(__snake_case ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowerCAmelCase__(__snake_case ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (
number > 1
), "'number' must been an int and >= 1"
lowerCamelCase__ = get_divisors(__snake_case )
# precondition
assert (
isinstance(__snake_case ,__snake_case )
and (divisors[0] == 1)
and (divisors[len(__snake_case ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Tuple:
'''simple docstring'''
assert (
isinstance(__snake_case ,__snake_case )
and isinstance(__snake_case ,__snake_case )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowerCamelCase__ = gcd(abs(__snake_case ) ,abs(__snake_case ) )
# precondition
assert (
isinstance(__snake_case ,__snake_case )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowerCAmelCase__(__snake_case ) -> Optional[int]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (n >= 0), "'n' must been a int and >= 0"
lowerCamelCase__ = 1 # this will be return.
for factor in range(1 ,n + 1 ):
ans *= factor
return ans
def lowerCAmelCase__(__snake_case ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (n >= 0), "'n' must been an int and >= 0"
lowerCamelCase__ = 0
lowerCamelCase__ = 1
lowerCamelCase__ = 1 # this will be return
for _ in range(n - 1 ):
lowerCamelCase__ = ans
ans += fiba
lowerCamelCase__ = tmp
return ans
| 29
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_a = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = ["""pixel_values"""]
def __init__( self , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = PILImageResampling.BICUBIC , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = True , __lowerCAmelCase = 1 / 2_5_5 , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = True , **__lowerCAmelCase , ):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
lowerCamelCase__ = size if size is not None else {'''shortest_edge''': 2_2_4}
lowerCamelCase__ = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase )
lowerCamelCase__ = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
lowerCamelCase__ = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase , param_name='''crop_size''' )
lowerCamelCase__ = do_resize
lowerCamelCase__ = size
lowerCamelCase__ = resample
lowerCamelCase__ = do_center_crop
lowerCamelCase__ = crop_size
lowerCamelCase__ = do_rescale
lowerCamelCase__ = rescale_factor
lowerCamelCase__ = do_normalize
lowerCamelCase__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCamelCase__ = image_std if image_std is not None else OPENAI_CLIP_STD
lowerCamelCase__ = do_convert_rgb
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = PILImageResampling.BICUBIC , __lowerCAmelCase = None , **__lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
lowerCamelCase__ = get_resize_output_image_size(__lowerCAmelCase , size=size['''shortest_edge'''] , default_to_square=__lowerCAmelCase )
return resize(__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = get_size_dict(__lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(__lowerCAmelCase , size=(size['''height'''], size['''width''']) , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ):
'''simple docstring'''
return rescale(__lowerCAmelCase , scale=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ):
'''simple docstring'''
return normalize(__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = ChannelDimension.FIRST , **__lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = do_resize if do_resize is not None else self.do_resize
lowerCamelCase__ = size if size is not None else self.size
lowerCamelCase__ = get_size_dict(__lowerCAmelCase , param_name='''size''' , default_to_square=__lowerCAmelCase )
lowerCamelCase__ = resample if resample is not None else self.resample
lowerCamelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase__ = crop_size if crop_size is not None else self.crop_size
lowerCamelCase__ = get_size_dict(__lowerCAmelCase , param_name='''crop_size''' , default_to_square=__lowerCAmelCase )
lowerCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase__ = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase__ = image_mean if image_mean is not None else self.image_mean
lowerCamelCase__ = image_std if image_std is not None else self.image_std
lowerCamelCase__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCamelCase__ = make_list_of_images(__lowerCAmelCase )
if not valid_images(__lowerCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCamelCase__ = [convert_to_rgb(__lowerCAmelCase ) for image in images]
# All transformations expect numpy arrays.
lowerCamelCase__ = [to_numpy_array(__lowerCAmelCase ) for image in images]
if do_resize:
lowerCamelCase__ = [self.resize(image=__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase ) for image in images]
if do_center_crop:
lowerCamelCase__ = [self.center_crop(image=__lowerCAmelCase , size=__lowerCAmelCase ) for image in images]
if do_rescale:
lowerCamelCase__ = [self.rescale(image=__lowerCAmelCase , scale=__lowerCAmelCase ) for image in images]
if do_normalize:
lowerCamelCase__ = [self.normalize(image=__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase ) for image in images]
lowerCamelCase__ = [to_channel_dimension_format(__lowerCAmelCase , __lowerCAmelCase ) for image in images]
lowerCamelCase__ = {'''pixel_values''': images}
return BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase )
| 29
|
from __future__ import annotations
def lowerCAmelCase__(__snake_case ,__snake_case = None ,__snake_case = None ) -> None:
'''simple docstring'''
if start is None:
lowerCamelCase__ = 0
if end is None:
lowerCamelCase__ = len(__snake_case ) - 1
if start >= end:
return
lowerCamelCase__ = (start + end) // 2
slowsort(__snake_case ,__snake_case ,__snake_case )
slowsort(__snake_case ,mid + 1 ,__snake_case )
if sequence[end] < sequence[mid]:
lowerCamelCase__ , lowerCamelCase__ = sequence[mid], sequence[end]
slowsort(__snake_case ,__snake_case ,end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 29
| 1
|
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def lowerCAmelCase__(__snake_case ) -> List[str]:
'''simple docstring'''
return ConvertCommand(
args.model_type ,args.tf_checkpoint ,args.pytorch_dump_output ,args.config ,args.finetuning_task_name )
_a = "\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n"
class __A ( lowerCAmelCase ):
'''simple docstring'''
@staticmethod
def __lowerCamelCase ( __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = parser.add_parser(
'''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , )
train_parser.add_argument('''--model_type''' , type=__lowerCAmelCase , required=__lowerCAmelCase , help='''Model\'s type.''' )
train_parser.add_argument(
'''--tf_checkpoint''' , type=__lowerCAmelCase , required=__lowerCAmelCase , help='''TensorFlow checkpoint path or folder.''' )
train_parser.add_argument(
'''--pytorch_dump_output''' , type=__lowerCAmelCase , required=__lowerCAmelCase , help='''Path to the PyTorch saved model output.''' )
train_parser.add_argument('''--config''' , type=__lowerCAmelCase , default='''''' , help='''Configuration file path or folder.''' )
train_parser.add_argument(
'''--finetuning_task_name''' , type=__lowerCAmelCase , default=__lowerCAmelCase , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , )
train_parser.set_defaults(func=__lowerCAmelCase )
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , *__lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = logging.get_logger('''transformers-cli/converting''' )
self._logger.info(F'Loading model {model_type}' )
lowerCamelCase__ = model_type
lowerCamelCase__ = tf_checkpoint
lowerCamelCase__ = pytorch_dump_output
lowerCamelCase__ = config
lowerCamelCase__ = finetuning_task_name
def __lowerCamelCase ( self ):
'''simple docstring'''
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowerCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowerCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowerCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(__lowerCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowerCAmelCase )
if "ckpt" in self._tf_checkpoint.lower():
lowerCamelCase__ = self._tf_checkpoint
lowerCamelCase__ = ''''''
else:
lowerCamelCase__ = self._tf_checkpoint
lowerCamelCase__ = ''''''
convert_transfo_xl_checkpoint_to_pytorch(
__lowerCAmelCase , self._config , self._pytorch_dump_output , __lowerCAmelCase )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowerCAmelCase )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowerCAmelCase )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
| 29
|
from __future__ import annotations
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> float:
'''simple docstring'''
if days_between_payments <= 0:
raise ValueError('''days_between_payments must be > 0''' )
if daily_interest_rate < 0:
raise ValueError('''daily_interest_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * daily_interest_rate * days_between_payments
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,) -> float:
'''simple docstring'''
if number_of_compounding_periods <= 0:
raise ValueError('''number_of_compounding_periods must be > 0''' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,) -> float:
'''simple docstring'''
if number_of_years <= 0:
raise ValueError('''number_of_years must be > 0''' )
if nominal_annual_percentage_rate < 0:
raise ValueError('''nominal_annual_percentage_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return compound_interest(
__snake_case ,nominal_annual_percentage_rate / 365 ,number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29
| 1
|
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __A ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = None , **__lowerCAmelCase , ):
'''simple docstring'''
super().__init__(
__lowerCAmelCase , split=__lowerCAmelCase , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase , streaming=__lowerCAmelCase , num_proc=__lowerCAmelCase , **__lowerCAmelCase , )
lowerCamelCase__ = path_or_paths if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else {self.split: path_or_paths}
lowerCamelCase__ = Text(
cache_dir=__lowerCAmelCase , data_files=__lowerCAmelCase , features=__lowerCAmelCase , **__lowerCAmelCase , )
def __lowerCamelCase ( self ):
'''simple docstring'''
if self.streaming:
lowerCamelCase__ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
self.builder.download_and_prepare(
download_config=__lowerCAmelCase , download_mode=__lowerCAmelCase , verification_mode=__lowerCAmelCase , base_path=__lowerCAmelCase , num_proc=self.num_proc , )
lowerCamelCase__ = self.builder.as_dataset(
split=self.split , verification_mode=__lowerCAmelCase , in_memory=self.keep_in_memory )
return dataset
| 29
|
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowerCAmelCase__(__snake_case ) -> Union[str, Any]:
'''simple docstring'''
def wrapper(*__snake_case ,**__snake_case ):
lowerCamelCase__ = timeit.default_timer()
lowerCamelCase__ = func(*__snake_case ,**__snake_case )
lowerCamelCase__ = timeit.default_timer() - starttime
return delta
lowerCamelCase__ = func.__name__
return wrapper
def lowerCAmelCase__(__snake_case ,__snake_case=100 ,__snake_case=None ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = []
lowerCamelCase__ = seq_shapes or {}
for i in range(__snake_case ):
lowerCamelCase__ = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(__snake_case ,_ArrayXD ):
lowerCamelCase__ = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(__snake_case ,datasets.Value ):
if v.dtype == "string":
lowerCamelCase__ = '''The small grey turtle was surprisingly fast when challenged.'''
else:
lowerCamelCase__ = np.random.randint(10 ,size=1 ).astype(v.dtype ).item()
elif isinstance(__snake_case ,datasets.Sequence ):
while isinstance(__snake_case ,datasets.Sequence ):
lowerCamelCase__ = v.feature
lowerCamelCase__ = seq_shapes[k]
lowerCamelCase__ = np.random.rand(*__snake_case ).astype(v.dtype )
lowerCamelCase__ = data
dummy_data.append((i, example) )
return dummy_data
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case=100 ,__snake_case=None ) -> str:
'''simple docstring'''
lowerCamelCase__ = generate_examples(__snake_case ,num_examples=__snake_case ,seq_shapes=__snake_case )
with ArrowWriter(features=__snake_case ,path=__snake_case ) as writer:
for key, record in dummy_data:
lowerCamelCase__ = features.encode_example(__snake_case )
writer.write(__snake_case )
lowerCamelCase__ , lowerCamelCase__ = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F'Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.' )
lowerCamelCase__ = datasets.Dataset.from_file(filename=__snake_case ,info=datasets.DatasetInfo(features=__snake_case ) )
return dataset
| 29
| 1
|
from __future__ import annotations
_a = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
class __A :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = graph
# mapping node to its parent in resulting breadth first tree
lowerCamelCase__ = {}
lowerCamelCase__ = source_vertex
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = {self.source_vertex}
lowerCamelCase__ = None
lowerCamelCase__ = [self.source_vertex] # first in first out queue
while queue:
lowerCamelCase__ = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(__lowerCAmelCase )
lowerCamelCase__ = vertex
queue.append(__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
lowerCamelCase__ = self.parent.get(__lowerCAmelCase )
if target_vertex_parent is None:
lowerCamelCase__ = (
F'No path from vertex: {self.source_vertex} to vertex: {target_vertex}'
)
raise ValueError(__lowerCAmelCase )
return self.shortest_path(__lowerCAmelCase ) + F'->{target_vertex}'
if __name__ == "__main__":
_a = Graph(graph, "G")
g.breath_first_search()
print(g.shortest_path("D"))
print(g.shortest_path("G"))
print(g.shortest_path("Foo"))
| 29
|
def lowerCAmelCase__(__snake_case ) -> int:
'''simple docstring'''
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''' )
for cell_n in range(1 ,len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
lowerCamelCase__ = grid[0]
for row_n in range(1 ,len(__snake_case ) ):
lowerCamelCase__ = grid[row_n]
lowerCamelCase__ = fill_row(__snake_case ,__snake_case )
lowerCamelCase__ = grid[row_n]
return grid[-1][-1]
def lowerCAmelCase__(__snake_case ,__snake_case ) -> list:
'''simple docstring'''
current_row[0] += row_above[0]
for cell_n in range(1 ,len(__snake_case ) ):
current_row[cell_n] += min(current_row[cell_n - 1] ,row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29
| 1
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = TFXLMRobertaModel.from_pretrained('''jplu/tf-xlm-roberta-base''' )
lowerCamelCase__ = {
'''input_ids''': tf.convert_to_tensor([[0, 2_6_4_6, 1_0_2_6_9, 8_3, 9_9_9_4_2, 2]] , dtype=tf.intaa ), # "My dog is cute"
'''attention_mask''': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
lowerCamelCase__ = model(__lowerCAmelCase )['''last_hidden_state''']
lowerCamelCase__ = tf.TensorShape((1, 6, 7_6_8) )
self.assertEqual(output.shape , __lowerCAmelCase )
# compare the actual values for a slice.
lowerCamelCase__ = tf.convert_to_tensor(
[
[
[0.068_1762, 0.1089_4451, 0.0677_2504],
[-0.0642_3668, 0.0236_6615, 0.0432_9344],
[-0.0605_7295, 0.0997_4135, -0.0007_0584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 29
|
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
_a = logging.get_logger(__name__)
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase_ = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(glue_processors.keys() )} )
lowerCAmelCase_ = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
lowerCAmelCase_ = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.task_name.lower()
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = """train"""
lowerCAmelCase_ = """dev"""
lowerCAmelCase_ = """test"""
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = Split.train , __lowerCAmelCase = None , ):
'''simple docstring'''
warnings.warn(
'''This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' , __lowerCAmelCase , )
lowerCamelCase__ = args
lowerCamelCase__ = glue_processors[args.task_name]()
lowerCamelCase__ = glue_output_modes[args.task_name]
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
try:
lowerCamelCase__ = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
# Load data features from cache or dataset file
lowerCamelCase__ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , )
lowerCamelCase__ = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCamelCase__ , lowerCamelCase__ = label_list[2], label_list[1]
lowerCamelCase__ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase__ = cached_features_file + '''.lock'''
with FileLock(__lowerCAmelCase ):
if os.path.exists(__lowerCAmelCase ) and not args.overwrite_cache:
lowerCamelCase__ = time.time()
lowerCamelCase__ = torch.load(__lowerCAmelCase )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
else:
logger.info(F'Creating features from dataset file at {args.data_dir}' )
if mode == Split.dev:
lowerCamelCase__ = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
lowerCamelCase__ = self.processor.get_test_examples(args.data_dir )
else:
lowerCamelCase__ = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
lowerCamelCase__ = examples[:limit_length]
lowerCamelCase__ = glue_convert_examples_to_features(
__lowerCAmelCase , __lowerCAmelCase , max_length=args.max_seq_length , label_list=__lowerCAmelCase , output_mode=self.output_mode , )
lowerCamelCase__ = time.time()
torch.save(self.features , __lowerCAmelCase )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self , __lowerCAmelCase ):
'''simple docstring'''
return self.features[i]
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.label_list
| 29
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_a = logging.get_logger(__name__)
_a = "▁"
_a = {"vocab_file": "sentencepiece.bpe.model"}
_a = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
}
}
_a = {
"facebook/mbart-large-en-ro": 1_024,
"facebook/mbart-large-cc25": 1_024,
}
# fmt: off
_a = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = ["""input_ids""", """attention_mask"""]
lowerCAmelCase_ = []
lowerCAmelCase_ = []
def __init__( self , __lowerCAmelCase , __lowerCAmelCase="<s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="<s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase="<mask>" , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase = None , __lowerCAmelCase=None , **__lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else mask_token
lowerCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , tokenizer_file=__lowerCAmelCase , src_lang=__lowerCAmelCase , tgt_lang=__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , )
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowerCAmelCase ) )
lowerCamelCase__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCamelCase__ = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCamelCase__ = 1
lowerCamelCase__ = len(self.sp_model )
lowerCamelCase__ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__lowerCAmelCase )
}
lowerCamelCase__ = {v: k for k, v in self.lang_code_to_id.items()}
lowerCamelCase__ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
lowerCamelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowerCamelCase__ = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
lowerCamelCase__ = src_lang if src_lang is not None else '''en_XX'''
lowerCamelCase__ = self.lang_code_to_id[self._src_lang]
lowerCamelCase__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
'''simple docstring'''
lowerCamelCase__ = self.__dict__.copy()
lowerCamelCase__ = None
lowerCamelCase__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCamelCase__ = {}
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
lowerCamelCase__ = [1] * len(self.prefix_tokens )
lowerCamelCase__ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__lowerCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(__lowerCAmelCase )) + ([0] * len(__lowerCAmelCase )) + suffix_ones
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
'''simple docstring'''
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
lowerCamelCase__ = src_lang
lowerCamelCase__ = self(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
lowerCamelCase__ = self.convert_tokens_to_ids(__lowerCAmelCase )
lowerCamelCase__ = tgt_lang_id
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCamelCase__ = self.sp_model.PieceToId(__lowerCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = ''''''.join(__lowerCAmelCase ).replace(__lowerCAmelCase , ''' ''' ).strip()
return out_string
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
'''simple docstring'''
if not os.path.isdir(__lowerCAmelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase__ = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCAmelCase , '''wb''' ) as fi:
lowerCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (out_vocab_file,)
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = "en_XX" , __lowerCAmelCase = None , __lowerCAmelCase = "ro_RO" , **__lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = src_lang
lowerCamelCase__ = tgt_lang
return super().prepare_seqaseq_batch(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.lang_code_to_id[src_lang]
lowerCamelCase__ = []
lowerCamelCase__ = [self.eos_token_id, self.cur_lang_code]
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.lang_code_to_id[lang]
lowerCamelCase__ = []
lowerCamelCase__ = [self.eos_token_id, self.cur_lang_code]
| 29
|
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
_a = datasets.logging.get_logger(__name__)
_a = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
_a = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
_a = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case=False ,__snake_case=False ,__snake_case=True ,__snake_case=False ,__snake_case="dummy_doc" ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = {doc: key_lines}
lowerCamelCase__ = {doc: sys_lines}
lowerCamelCase__ = {}
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ , lowerCamelCase__ = reader.get_doc_mentions(__snake_case ,key_doc_lines[doc] ,__snake_case )
key_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase__ = reader.set_annotated_parse_trees(__snake_case ,key_doc_lines[doc] ,__snake_case ,__snake_case )
lowerCamelCase__ , lowerCamelCase__ = reader.get_doc_mentions(__snake_case ,sys_doc_lines[doc] ,__snake_case )
sys_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase__ = reader.set_annotated_parse_trees(__snake_case ,key_doc_lines[doc] ,__snake_case ,__snake_case )
if remove_nested:
lowerCamelCase__ , lowerCamelCase__ = reader.remove_nested_coref_mentions(__snake_case ,__snake_case )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowerCamelCase__ , lowerCamelCase__ = reader.remove_nested_coref_mentions(__snake_case ,__snake_case )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowerCamelCase__ = reader.get_mention_assignments(__snake_case ,__snake_case )
lowerCamelCase__ = reader.get_mention_assignments(__snake_case ,__snake_case )
lowerCamelCase__ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
F'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' )
logger.info(
'''Number of resulting singleton clusters in the key '''
F'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' )
if not keep_singletons:
logger.info(
F'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '
'''files, respectively''' )
return doc_coref_infos
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> str:
'''simple docstring'''
lowerCamelCase__ = get_coref_infos(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
lowerCamelCase__ = {}
lowerCamelCase__ = 0
lowerCamelCase__ = 0
for name, metric in metrics:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = evaluator.evaluate_documents(__snake_case ,__snake_case ,beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'{name}/recall': recall, F'{name}/precision': precision, F'{name}/f1': fa} )
logger.info(
name.ljust(10 ) ,F'Recall: {recall * 100:.2f}' ,F' Precision: {precision * 100:.2f}' ,F' F1: {fa * 100:.2f}' ,)
if conll_subparts_num == 3:
lowerCamelCase__ = (conll / 3) * 100
logger.info(F'CoNLL score: {conll:.2f}' )
output_scores.update({'''conll_score''': conll} )
return output_scores
def lowerCAmelCase__(__snake_case ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
lowerCamelCase__ = line.split()[5]
if not parse_col == "-":
lowerCamelCase__ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
'''simple docstring'''
def __lowerCamelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Sequence(datasets.Value('''string''' ) ),
} ) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] , )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=False ):
'''simple docstring'''
lowerCamelCase__ = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
lowerCamelCase__ = util.check_gold_parse_annotation(__lowerCAmelCase )
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowerCamelCase__ = evaluate(
key_lines=__lowerCAmelCase , sys_lines=__lowerCAmelCase , metrics=__lowerCAmelCase , NP_only=__lowerCAmelCase , remove_nested=__lowerCAmelCase , keep_singletons=__lowerCAmelCase , min_span=__lowerCAmelCase , )
return score
| 29
| 1
|
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a = logging.get_logger(__name__)
_a = "▁"
_a = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
_a = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
_a = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
_a = {
"ernie-m-base": 514,
"ernie-m-large": 514,
}
_a = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = ["input_ids"]
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = RESOURCE_FILES_NAMES
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase="utf8" , __lowerCAmelCase="[UNK]" , __lowerCAmelCase="[SEP]" , __lowerCAmelCase="[PAD]" , __lowerCAmelCase="[CLS]" , __lowerCAmelCase="[MASK]" , __lowerCAmelCase = None , **__lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , vocab_file=__lowerCAmelCase , encoding=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , )
lowerCamelCase__ = do_lower_case
lowerCamelCase__ = sentencepiece_model_ckpt
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCAmelCase )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
lowerCamelCase__ = self.load_vocab(filepath=__lowerCAmelCase )
else:
lowerCamelCase__ = {self.sp_model.id_to_piece(__lowerCAmelCase ): id for id in range(self.sp_model.get_piece_size() )}
lowerCamelCase__ = {v: k for k, v in self.vocab.items()}
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
if text is None:
return None
lowerCamelCase__ = self.tokenize(__lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ = '''''', []
for i, ch in enumerate(__lowerCAmelCase ):
if ch in self.SP_CHAR_MAPPING:
lowerCamelCase__ = self.SP_CHAR_MAPPING.get(__lowerCAmelCase )
else:
lowerCamelCase__ = unicodedata.normalize('''NFKC''' , __lowerCAmelCase )
if self.is_whitespace(__lowerCAmelCase ):
continue
normalized_text += ch
char_mapping.extend([i] * len(__lowerCAmelCase ) )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = normalized_text, [], 0
if self.do_lower_case:
lowerCamelCase__ = text.lower()
for token in split_tokens:
if token[:1] == "▁":
lowerCamelCase__ = token[1:]
lowerCamelCase__ = text[offset:].index(__lowerCAmelCase ) + offset
lowerCamelCase__ = start + len(__lowerCAmelCase )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
lowerCamelCase__ = end
return token_mapping
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.vocab )
def __lowerCamelCase ( self ):
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ):
'''simple docstring'''
lowerCamelCase__ = self.__dict__.copy()
lowerCamelCase__ = None
return state
def __setstate__( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCamelCase__ = {}
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
return "".join((self.SP_CHAR_MAPPING.get(__lowerCAmelCase , __lowerCAmelCase ) for c in text) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=6_4 , __lowerCAmelCase=0.1 ):
'''simple docstring'''
if self.sp_model_kwargs.get('''enable_sampling''' ) is True:
lowerCamelCase__ = True
if self.sp_model_kwargs.get('''alpha''' ) is not None:
lowerCamelCase__ = self.sp_model_kwargs.get('''alpha''' )
if self.sp_model_kwargs.get('''nbest_size''' ) is not None:
lowerCamelCase__ = self.sp_model_kwargs.get('''nbest_size''' )
if not enable_sampling:
lowerCamelCase__ = self.sp_model.EncodeAsPieces(__lowerCAmelCase )
else:
lowerCamelCase__ = self.sp_model.SampleEncodeAsPieces(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = []
for pi, piece in enumerate(__lowerCAmelCase ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(__lowerCAmelCase ) and pi != 0:
new_pieces.append(__lowerCAmelCase )
continue
else:
continue
lowerCamelCase__ = 0
for i, chunk in enumerate(__lowerCAmelCase ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(__lowerCAmelCase ) or self.is_punct(__lowerCAmelCase ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(__lowerCAmelCase )
lowerCamelCase__ = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowerCamelCase__ = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowerCamelCase__ = i
if len(__lowerCAmelCase ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = ''''''.join(__lowerCAmelCase ).replace(__lowerCAmelCase , ''' ''' ).strip()
return out_string
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.convert_ids_to_tokens(__lowerCAmelCase )
lowerCamelCase__ = ''''''.join(__lowerCAmelCase ).replace(__lowerCAmelCase , ''' ''' ).strip()
return out_string
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
return self.vocab.get(__lowerCAmelCase , self.vocab.get(self.unk_token ) )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
return self.reverse_vocab.get(__lowerCAmelCase , self.unk_token )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase=None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
lowerCamelCase__ = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase=None ):
'''simple docstring'''
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__lowerCAmelCase )) + [1, 1] + ([0] * len(__lowerCAmelCase )) + [1]
return [1] + ([0] * len(__lowerCAmelCase )) + [1]
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
'''simple docstring'''
if token_ids_a is None:
# [CLS] X [SEP]
return (len(__lowerCAmelCase ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(__lowerCAmelCase ) + 1) + [1] * (len(__lowerCAmelCase ) + 3)
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
if "\u4e00" <= char <= "\u9fff":
return True
return False
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(__lowerCAmelCase ) == 1:
lowerCamelCase__ = unicodedata.category(__lowerCAmelCase )
if cat == "Zs":
return True
return False
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = {}
with io.open(__lowerCAmelCase , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(__lowerCAmelCase ):
lowerCamelCase__ = line.rstrip('''\n''' )
lowerCamelCase__ = int(__lowerCAmelCase )
return token_to_idx
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
'''simple docstring'''
lowerCamelCase__ = 0
if os.path.isdir(__lowerCAmelCase ):
lowerCamelCase__ = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
lowerCamelCase__ = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda __lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
''' Please check that the vocabulary is not corrupted!''' )
lowerCamelCase__ = token_index
writer.write(token + '''\n''' )
index += 1
lowerCamelCase__ = os.path.join(__lowerCAmelCase , '''sentencepiece.bpe.model''' )
with open(__lowerCAmelCase , '''wb''' ) as fi:
lowerCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (vocab_file,)
| 29
|
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
_a = open # noqa: we just need to have a builtin inside this module to test it properly
| 29
| 1
|
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
_a = logging.get_logger(__name__)
_a = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
_a = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """Model type selected in the list: """ + """, """.join(lowerCAmelCase )} )
lowerCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """The input data dir. Should contain the .json files for the SQuAD task."""} )
lowerCAmelCase_ = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
lowerCAmelCase_ = field(
default=128 , metadata={"""help""": """When splitting up a long document into chunks, how much stride to take between chunks."""} , )
lowerCAmelCase_ = field(
default=64 , metadata={
"""help""": (
"""The maximum number of tokens for the question. Questions longer than this will """
"""be truncated to this length."""
)
} , )
lowerCAmelCase_ = field(
default=30 , metadata={
"""help""": (
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
)
} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
lowerCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """If true, the SQuAD examples contain some that do not have an answer."""} )
lowerCAmelCase_ = field(
default=0.0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
lowerCAmelCase_ = field(
default=20 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
lowerCAmelCase_ = field(
default=0 , metadata={
"""help""": (
"""language id of input for language-specific xlm models (see"""
""" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"""
)
} , )
lowerCAmelCase_ = field(default=1 , metadata={"""help""": """multiple threads for converting example to features"""} )
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = """train"""
lowerCAmelCase_ = """dev"""
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = Split.train , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = "pt" , ):
'''simple docstring'''
lowerCamelCase__ = args
lowerCamelCase__ = is_language_sensitive
lowerCamelCase__ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
try:
lowerCamelCase__ = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
lowerCamelCase__ = mode
# Load data features from cache or dataset file
lowerCamelCase__ = '''v2''' if args.version_2_with_negative else '''v1'''
lowerCamelCase__ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase__ = cached_features_file + '''.lock'''
with FileLock(__lowerCAmelCase ):
if os.path.exists(__lowerCAmelCase ) and not args.overwrite_cache:
lowerCamelCase__ = time.time()
lowerCamelCase__ = torch.load(__lowerCAmelCase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowerCamelCase__ = self.old_features['''features''']
lowerCamelCase__ = self.old_features.get('''dataset''' , __lowerCAmelCase )
lowerCamelCase__ = self.old_features.get('''examples''' , __lowerCAmelCase )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
''' future run''' )
else:
if mode == Split.dev:
lowerCamelCase__ = self.processor.get_dev_examples(args.data_dir )
else:
lowerCamelCase__ = self.processor.get_train_examples(args.data_dir )
lowerCamelCase__ , lowerCamelCase__ = squad_convert_examples_to_features(
examples=self.examples , tokenizer=__lowerCAmelCase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=__lowerCAmelCase , )
lowerCamelCase__ = time.time()
torch.save(
{'''features''': self.features, '''dataset''': self.dataset, '''examples''': self.examples} , __lowerCAmelCase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.features[i]
lowerCamelCase__ = torch.tensor(feature.input_ids , dtype=torch.long )
lowerCamelCase__ = torch.tensor(feature.attention_mask , dtype=torch.long )
lowerCamelCase__ = torch.tensor(feature.token_type_ids , dtype=torch.long )
lowerCamelCase__ = torch.tensor(feature.cls_index , dtype=torch.long )
lowerCamelCase__ = torch.tensor(feature.p_mask , dtype=torch.float )
lowerCamelCase__ = torch.tensor(feature.is_impossible , dtype=torch.float )
lowerCamelCase__ = {
'''input_ids''': input_ids,
'''attention_mask''': attention_mask,
'''token_type_ids''': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'''cls_index''': cls_index, '''p_mask''': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'''is_impossible''': is_impossible} )
if self.is_language_sensitive:
inputs.update({'''langs''': (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowerCamelCase__ = torch.tensor(feature.start_position , dtype=torch.long )
lowerCamelCase__ = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({'''start_positions''': start_positions, '''end_positions''': end_positions} )
return inputs
| 29
|
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
_a = logging.get_logger(__name__)
class __A :
'''simple docstring'''
lowerCAmelCase_ = None
@experimental
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> Tuple:
'''simple docstring'''
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
return _map_with_joblib(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> int:
'''simple docstring'''
lowerCamelCase__ = num_proc if num_proc <= len(__snake_case ) else len(__snake_case )
lowerCamelCase__ = [] # We organize the splits ourselve (contiguous splits)
for index in range(__snake_case ):
lowerCamelCase__ = len(__snake_case ) // num_proc
lowerCamelCase__ = len(__snake_case ) % num_proc
lowerCamelCase__ = div * index + min(__snake_case ,__snake_case )
lowerCamelCase__ = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(__snake_case ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
F'Error dividing inputs iterable among processes. '
F'Total number of objects {len(__snake_case )}, '
F'length: {sum(len(i[1] ) for i in split_kwds )}' )
logger.info(
F'Spawning {num_proc} processes for {len(__snake_case )} objects in slices of {[len(i[1] ) for i in split_kwds]}' )
lowerCamelCase__ , lowerCamelCase__ = None, None
if not disable_tqdm:
lowerCamelCase__ , lowerCamelCase__ = (RLock(),), tqdm.set_lock
with Pool(__snake_case ,initargs=__snake_case ,initializer=__snake_case ) as pool:
lowerCamelCase__ = pool.map(__snake_case ,__snake_case )
logger.info(F'Finished {num_proc} processes' )
lowerCamelCase__ = [obj for proc_res in mapped for obj in proc_res]
logger.info(F'Unpacked {len(__snake_case )} objects' )
return mapped
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> List[str]:
'''simple docstring'''
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name ,n_jobs=__snake_case ):
return joblib.Parallel()(
joblib.delayed(__snake_case )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def lowerCAmelCase__(__snake_case ) -> int:
'''simple docstring'''
lowerCamelCase__ = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
lowerCamelCase__ = None
| 29
| 1
|
import enum
import shutil
import sys
_a , _a = shutil.get_terminal_size()
_a = {"UP": "A", "DOWN": "B", "RIGHT": "C", "LEFT": "D"}
class __A ( enum.Enum ):
'''simple docstring'''
lowerCAmelCase_ = 0
lowerCAmelCase_ = 1
def lowerCAmelCase__(__snake_case ,__snake_case="" ) -> List[Any]:
'''simple docstring'''
sys.stdout.write(str(__snake_case ) + end )
sys.stdout.flush()
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case="" ) -> List[Any]:
'''simple docstring'''
forceWrite(F'\u001b[{color}m{content}\u001b[0m' ,__snake_case )
def lowerCAmelCase__() -> int:
'''simple docstring'''
forceWrite('''\r''' )
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Optional[int]:
'''simple docstring'''
forceWrite(F'\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}' )
def lowerCAmelCase__() -> List[Any]:
'''simple docstring'''
forceWrite(''' ''' * TERMINAL_WIDTH )
reset_cursor()
def lowerCAmelCase__() -> int:
'''simple docstring'''
reset_cursor()
forceWrite('''-''' * TERMINAL_WIDTH )
| 29
|
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = 42
class __A ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
@register_to_config
def __init__( self , __lowerCAmelCase = 1_6 , __lowerCAmelCase = 8_8 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = 1 , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 3_2 , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = "geglu" , __lowerCAmelCase = True , __lowerCAmelCase = True , ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = attention_head_dim
lowerCamelCase__ = num_attention_heads * attention_head_dim
lowerCamelCase__ = in_channels
lowerCamelCase__ = torch.nn.GroupNorm(num_groups=__lowerCAmelCase , num_channels=__lowerCAmelCase , eps=1E-6 , affine=__lowerCAmelCase )
lowerCamelCase__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase )
# 3. Define transformers blocks
lowerCamelCase__ = nn.ModuleList(
[
BasicTransformerBlock(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , dropout=__lowerCAmelCase , cross_attention_dim=__lowerCAmelCase , activation_fn=__lowerCAmelCase , attention_bias=__lowerCAmelCase , double_self_attention=__lowerCAmelCase , norm_elementwise_affine=__lowerCAmelCase , )
for d in range(__lowerCAmelCase )
] )
lowerCamelCase__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=1 , __lowerCAmelCase=None , __lowerCAmelCase = True , ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = hidden_states.shape
lowerCamelCase__ = batch_frames // num_frames
lowerCamelCase__ = hidden_states
lowerCamelCase__ = hidden_states[None, :].reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
lowerCamelCase__ = self.norm(__lowerCAmelCase )
lowerCamelCase__ = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , __lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = self.proj_in(__lowerCAmelCase )
# 2. Blocks
for block in self.transformer_blocks:
lowerCamelCase__ = block(
__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , timestep=__lowerCAmelCase , cross_attention_kwargs=__lowerCAmelCase , class_labels=__lowerCAmelCase , )
# 3. Output
lowerCamelCase__ = self.proj_out(__lowerCAmelCase )
lowerCamelCase__ = (
hidden_states[None, None, :]
.reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
lowerCamelCase__ = hidden_states.reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=__lowerCAmelCase )
| 29
| 1
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class __A ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = tempfile.mkdtemp()
lowerCamelCase__ = BlipImageProcessor()
lowerCamelCase__ = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
lowerCamelCase__ = BlipaProcessor(__lowerCAmelCase , __lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def __lowerCamelCase ( self , **__lowerCAmelCase ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ).tokenizer
def __lowerCamelCase ( self , **__lowerCAmelCase ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ).image_processor
def __lowerCamelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
lowerCamelCase__ = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowerCamelCase__ = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 )
lowerCamelCase__ = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = BlipaProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
lowerCamelCase__ = self.prepare_image_inputs()
lowerCamelCase__ = image_processor(__lowerCAmelCase , return_tensors='''np''' )
lowerCamelCase__ = processor(images=__lowerCAmelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = BlipaProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
lowerCamelCase__ = '''lower newer'''
lowerCamelCase__ = processor(text=__lowerCAmelCase )
lowerCamelCase__ = tokenizer(__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = BlipaProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
lowerCamelCase__ = '''lower newer'''
lowerCamelCase__ = self.prepare_image_inputs()
lowerCamelCase__ = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase ):
processor()
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = BlipaProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
lowerCamelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase__ = processor.batch_decode(__lowerCAmelCase )
lowerCamelCase__ = tokenizer.batch_decode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = BlipaProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
lowerCamelCase__ = '''lower newer'''
lowerCamelCase__ = self.prepare_image_inputs()
lowerCamelCase__ = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 29
|
_a = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
_a = [{"type": "code", "content": INSTALL_CONTENT}]
_a = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 29
| 1
|
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __A :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=2 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=2 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=9_9 , __lowerCAmelCase=3_6 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=3_7 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=1_6 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=6 , __lowerCAmelCase=6 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , __lowerCAmelCase=1_0_0_0 , ):
'''simple docstring'''
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = image_size
lowerCamelCase__ = patch_size
lowerCamelCase__ = text_seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_input_mask
lowerCamelCase__ = use_token_type_ids
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = type_sequence_label_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = coordinate_size
lowerCamelCase__ = shape_size
lowerCamelCase__ = num_labels
lowerCamelCase__ = num_choices
lowerCamelCase__ = scope
lowerCamelCase__ = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
lowerCamelCase__ = text_seq_length
lowerCamelCase__ = (image_size // patch_size) ** 2 + 1
lowerCamelCase__ = self.text_seq_length + self.image_seq_length
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
lowerCamelCase__ = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCamelCase__ = bbox[i, j, 3]
lowerCamelCase__ = bbox[i, j, 1]
lowerCamelCase__ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCamelCase__ = bbox[i, j, 2]
lowerCamelCase__ = bbox[i, j, 0]
lowerCamelCase__ = t
lowerCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.text_seq_length] )
lowerCamelCase__ = None
if self.use_token_type_ids:
lowerCamelCase__ = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
lowerCamelCase__ = None
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
lowerCamelCase__ = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = LayoutLMvaModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
# text + image
lowerCamelCase__ = model(__lowerCAmelCase , pixel_values=__lowerCAmelCase )
lowerCamelCase__ = model(
__lowerCAmelCase , bbox=__lowerCAmelCase , pixel_values=__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase , bbox=__lowerCAmelCase , pixel_values=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase , bbox=__lowerCAmelCase , pixel_values=__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
lowerCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
lowerCamelCase__ = model(pixel_values=__lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = LayoutLMvaForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(
__lowerCAmelCase , bbox=__lowerCAmelCase , pixel_values=__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = LayoutLMvaForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(
__lowerCAmelCase , bbox=__lowerCAmelCase , pixel_values=__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = LayoutLMvaForQuestionAnswering(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(
__lowerCAmelCase , bbox=__lowerCAmelCase , pixel_values=__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = config_and_inputs
lowerCamelCase__ = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{"""document-question-answering""": LayoutLMvaForQuestionAnswering, """feature-extraction""": LayoutLMvaModel}
if is_torch_available()
else {}
)
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
return True
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = LayoutLMvaModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ):
'''simple docstring'''
lowerCamelCase__ = copy.deepcopy(__lowerCAmelCase )
if model_class in get_values(__lowerCAmelCase ):
lowerCamelCase__ = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(__lowerCAmelCase , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__lowerCAmelCase ):
lowerCamelCase__ = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase )
elif model_class in get_values(__lowerCAmelCase ):
lowerCamelCase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase )
lowerCamelCase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase )
elif model_class in [
*get_values(__lowerCAmelCase ),
]:
lowerCamelCase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase )
elif model_class in [
*get_values(__lowerCAmelCase ),
]:
lowerCamelCase__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=__lowerCAmelCase , )
return inputs_dict
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase__ = type
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = LayoutLMvaModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def lowerCAmelCase__() -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class __A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __lowerCamelCase ( self ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=__lowerCAmelCase ) if is_vision_available() else None
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = LayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ).to(__lowerCAmelCase )
lowerCamelCase__ = self.default_image_processor
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).pixel_values.to(__lowerCAmelCase )
lowerCamelCase__ = torch.tensor([[1, 2]] )
lowerCamelCase__ = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
lowerCamelCase__ = model(
input_ids=input_ids.to(__lowerCAmelCase ) , bbox=bbox.to(__lowerCAmelCase ) , pixel_values=pixel_values.to(__lowerCAmelCase ) , )
# verify the logits
lowerCamelCase__ = torch.Size((1, 1_9_9, 7_6_8) )
self.assertEqual(outputs.last_hidden_state.shape , __lowerCAmelCase )
lowerCamelCase__ = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __lowerCAmelCase , atol=1E-4 ) )
| 29
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_a = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
_a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 29
| 1
|
_a = "Alexander Joslin"
import operator as op
from .stack import Stack
def lowerCAmelCase__(__snake_case ) -> int:
'''simple docstring'''
lowerCamelCase__ = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
lowerCamelCase__ = Stack()
lowerCamelCase__ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__snake_case ) )
elif i in operators:
# RULE 2
operator_stack.push(__snake_case )
elif i == ")":
# RULE 4
lowerCamelCase__ = operator_stack.peek()
operator_stack.pop()
lowerCamelCase__ = operand_stack.peek()
operand_stack.pop()
lowerCamelCase__ = operand_stack.peek()
operand_stack.pop()
lowerCamelCase__ = operators[opr](__snake_case ,__snake_case )
operand_stack.push(__snake_case )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
_a = "(5 + ((4 * 2) * (2 + 3)))"
# answer = 45
print(f"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 29
|
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
_a = logging.get_logger(__name__)
class __A ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
warnings.warn(
'''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use OwlViTImageProcessor instead.''' , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
| 29
| 1
|
import torch
from diffusers import StableDiffusionPipeline
_a = "path-to-your-trained-model"
_a = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("cuda")
_a = "A photo of sks dog in a bucket"
_a = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("dog-bucket.png")
| 29
|
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> Any:
'''simple docstring'''
lowerCamelCase__ = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowerCamelCase__ = {
'''wmt16-en-de-dist-12-1''': [2_8.3, 2_7.5_2],
'''wmt16-en-de-dist-6-1''': [2_7.4, 2_7.1_1],
'''wmt16-en-de-12-1''': [2_6.9, 2_5.7_5],
}
lowerCamelCase__ = F'{src_lang}-{tgt_lang}'
lowerCamelCase__ = F'\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "allenai/{model_name}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n'
model_card_dir.mkdir(parents=__snake_case ,exist_ok=__snake_case )
lowerCamelCase__ = os.path.join(__snake_case ,'''README.md''' )
print(F'Generating {path}' )
with open(__snake_case ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(__snake_case )
# make sure we are under the root of the project
_a = Path(__file__).resolve().parent.parent.parent
_a = repo_dir / "model_cards"
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
_a = model_cards_dir / "allenai" / model_name
write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
| 29
| 1
|
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase_ = 42
lowerCAmelCase_ = None
# Automatically constructed
lowerCAmelCase_ = "dict"
lowerCAmelCase_ = None
lowerCAmelCase_ = field(default="""Translation""" , init=lowerCAmelCase , repr=lowerCAmelCase )
def __call__( self ):
'''simple docstring'''
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def __lowerCamelCase ( self ):
'''simple docstring'''
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = None
# Automatically constructed
lowerCAmelCase_ = "dict"
lowerCAmelCase_ = None
lowerCAmelCase_ = field(default="""TranslationVariableLanguages""" , init=lowerCAmelCase , repr=lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = sorted(set(self.languages ) ) if self.languages else None
lowerCamelCase__ = len(self.languages ) if self.languages else None
def __call__( self ):
'''simple docstring'''
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = set(self.languages )
if self.languages and set(__lowerCAmelCase ) - lang_set:
raise ValueError(
F'Some languages in example ({", ".join(sorted(set(__lowerCAmelCase ) - lang_set ) )}) are not in valid set ({", ".join(__lowerCAmelCase )}).' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
lowerCamelCase__ = []
for lang, text in translation_dict.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
lowerCamelCase__ , lowerCamelCase__ = zip(*sorted(__lowerCAmelCase ) )
return {"language": languages, "translation": translations}
def __lowerCamelCase ( self ):
'''simple docstring'''
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 29
|
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
_a = logging.get_logger(__name__)
class __A ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
warnings.warn(
'''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use SegformerImageProcessor instead.''' , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
| 29
| 1
|
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __A ( lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = RoCBertTokenizer
lowerCAmelCase_ = None
lowerCAmelCase_ = False
lowerCAmelCase_ = True
lowerCAmelCase_ = filter_non_english
def __lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
lowerCamelCase__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''你''', '''好''', '''是''', '''谁''', '''a''', '''b''', '''c''', '''d''']
lowerCamelCase__ = {}
lowerCamelCase__ = {}
for i, value in enumerate(__lowerCAmelCase ):
lowerCamelCase__ = i
lowerCamelCase__ = i
lowerCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_shape_file'''] )
lowerCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_pronunciation_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.word_shape_file , '''w''' , encoding='''utf-8''' ) as word_shape_writer:
json.dump(__lowerCAmelCase , __lowerCAmelCase , ensure_ascii=__lowerCAmelCase )
with open(self.word_pronunciation_file , '''w''' , encoding='''utf-8''' ) as word_pronunciation_writer:
json.dump(__lowerCAmelCase , __lowerCAmelCase , ensure_ascii=__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCamelCase__ = tokenizer.tokenize('''你好[SEP]你是谁''' )
self.assertListEqual(__lowerCAmelCase , ['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(__lowerCAmelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(__lowerCAmelCase ) , [5, 6, 2, 5, 7, 8] )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = RoCBertBasicTokenizer(do_lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = RoCBertBasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = RoCBertBasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = RoCBertBasicTokenizer(do_lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = RoCBertBasicTokenizer(do_lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = RoCBertBasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = RoCBertBasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = RoCBertBasicTokenizer(do_lower_case=__lowerCAmelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
lowerCamelCase__ = {}
for i, token in enumerate(__lowerCAmelCase ):
lowerCamelCase__ = i
lowerCamelCase__ = RoCBertWordpieceTokenizer(vocab=__lowerCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__lowerCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
if self.test_rust_tokenizer:
lowerCamelCase__ = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(__lowerCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
def __lowerCamelCase ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
lowerCamelCase__ = F'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
lowerCamelCase__ = tokenizer_r.encode_plus(
__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , )
lowerCamelCase__ = tokenizer_r.do_lower_case if hasattr(__lowerCAmelCase , '''do_lower_case''' ) else False
lowerCamelCase__ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''Allen'''),
((2_1, 2_3), '''##NL'''),
((2_3, 2_4), '''##P'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''allen'''),
((2_1, 2_3), '''##nl'''),
((2_3, 2_4), '''##p'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = ['''的''', '''人''', '''有''']
lowerCamelCase__ = ''''''.join(__lowerCAmelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCamelCase__ = True
lowerCamelCase__ = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
lowerCamelCase__ = tokenizer_p.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
lowerCamelCase__ = tokenizer_r.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
lowerCamelCase__ = tokenizer_r.convert_ids_to_tokens(__lowerCAmelCase )
lowerCamelCase__ = tokenizer_p.convert_ids_to_tokens(__lowerCAmelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = False
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
lowerCamelCase__ = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
lowerCamelCase__ = tokenizer_r.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
lowerCamelCase__ = tokenizer_p.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
lowerCamelCase__ = tokenizer_r.convert_ids_to_tokens(__lowerCAmelCase )
lowerCamelCase__ = tokenizer_p.convert_ids_to_tokens(__lowerCAmelCase )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCamelCase__ = [
F'##{token}' if idx != 0 else token for idx, token in enumerate(__lowerCAmelCase )
]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCamelCase__ = tokenizer.encode('''你好''' , add_special_tokens=__lowerCAmelCase )
lowerCamelCase__ = tokenizer.encode('''你是谁''' , add_special_tokens=__lowerCAmelCase )
lowerCamelCase__ = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase )
lowerCamelCase__ = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase , __lowerCAmelCase )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
lowerCamelCase__ = '''你好,你是谁'''
lowerCamelCase__ = tokenizer.tokenize(__lowerCAmelCase )
lowerCamelCase__ = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
lowerCamelCase__ = tokenizer.convert_tokens_to_shape_ids(__lowerCAmelCase )
lowerCamelCase__ = tokenizer.convert_tokens_to_pronunciation_ids(__lowerCAmelCase )
lowerCamelCase__ = tokenizer.prepare_for_model(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
lowerCamelCase__ = tokenizer.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
| 29
|
from queue import PriorityQueue
from typing import Any
import numpy as np
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,) -> float | int:
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
lowerCamelCase__ = cst_fwd.get(__snake_case ,np.inf )
lowerCamelCase__ = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
lowerCamelCase__ = new_cost_f
lowerCamelCase__ = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
lowerCamelCase__ = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> int:
'''simple docstring'''
lowerCamelCase__ = -1
lowerCamelCase__ = set()
lowerCamelCase__ = set()
lowerCamelCase__ = {source: 0}
lowerCamelCase__ = {destination: 0}
lowerCamelCase__ = {source: None}
lowerCamelCase__ = {destination: None}
lowerCamelCase__ = PriorityQueue()
lowerCamelCase__ = PriorityQueue()
lowerCamelCase__ = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
lowerCamelCase__ , lowerCamelCase__ = queue_forward.get()
visited_forward.add(__snake_case )
lowerCamelCase__ , lowerCamelCase__ = queue_backward.get()
visited_backward.add(__snake_case )
lowerCamelCase__ = pass_and_relaxation(
__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,)
lowerCamelCase__ = pass_and_relaxation(
__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,)
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
lowerCamelCase__ = shortest_distance
return shortest_path_distance
_a = {
"B": [["C", 1]],
"C": [["D", 1]],
"D": [["F", 1]],
"E": [["B", 1], ["G", 2]],
"F": [],
"G": [["F", 1]],
}
_a = {
"B": [["E", 1]],
"C": [["B", 1]],
"D": [["C", 1]],
"F": [["D", 1], ["G", 1]],
"E": [[None, np.inf]],
"G": [["E", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29
| 1
|
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = """M-CLIP"""
def __init__( self , __lowerCAmelCase=1_0_2_4 , __lowerCAmelCase=7_6_8 , **__lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = transformerDimSize
lowerCamelCase__ = imageDimSize
super().__init__(**__lowerCAmelCase )
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = MCLIPConfig
def __init__( self , __lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
super().__init__(__lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase )
lowerCamelCase__ = XLMRobertaModel(__lowerCAmelCase )
lowerCamelCase__ = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.transformer(input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0]
lowerCamelCase__ = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(__lowerCAmelCase ), embs
| 29
|
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = """ClapFeatureExtractor"""
lowerCAmelCase_ = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
def __call__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = kwargs.pop('''sampling_rate''' , __lowerCAmelCase )
if text is None and audios is None:
raise ValueError('''You have to specify either text or audios. Both cannot be none.''' )
if text is not None:
lowerCamelCase__ = self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if audios is not None:
lowerCamelCase__ = self.feature_extractor(
__lowerCAmelCase , sampling_rate=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if text is not None and audios is not None:
lowerCamelCase__ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCAmelCase ) , tensor_type=__lowerCAmelCase )
def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.tokenizer.model_input_names
lowerCamelCase__ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 29
| 1
|
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def lowerCAmelCase__(__snake_case ,__snake_case ) -> str:
'''simple docstring'''
lowerCamelCase__ = []
for part_id in partition_order:
lowerCamelCase__ = df.where(F'SPARK_PARTITION_ID() = {part_id}' ).collect()
for row_idx, row in enumerate(__snake_case ):
expected_row_ids_and_row_dicts.append((F'{part_id}_{row_idx}', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def lowerCAmelCase__() -> Dict:
'''simple docstring'''
lowerCamelCase__ = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
lowerCamelCase__ = spark.range(100 ).repartition(1 )
lowerCamelCase__ = Spark(__snake_case )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def lowerCAmelCase__() -> str:
'''simple docstring'''
lowerCamelCase__ = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
lowerCamelCase__ = spark.range(10 ).repartition(2 )
lowerCamelCase__ = [1, 0]
lowerCamelCase__ = _generate_iterable_examples(__snake_case ,__snake_case ) # Reverse the partitions.
lowerCamelCase__ = _get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case ,__snake_case )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
lowerCamelCase__ , lowerCamelCase__ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCAmelCase__() -> Any:
'''simple docstring'''
lowerCamelCase__ = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
lowerCamelCase__ = spark.range(10 ).repartition(1 )
lowerCamelCase__ = SparkExamplesIterable(__snake_case )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(__snake_case ):
assert row_id == F'0_{i}'
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def lowerCAmelCase__() -> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
lowerCamelCase__ = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
lowerCamelCase__ = lambda __snake_case : x.reverse()
lowerCamelCase__ = _get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case ,[2, 1, 0] )
lowerCamelCase__ = SparkExamplesIterable(__snake_case ).shuffle_data_sources(__snake_case )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(__snake_case ):
lowerCamelCase__ , lowerCamelCase__ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCAmelCase__() -> int:
'''simple docstring'''
lowerCamelCase__ = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
lowerCamelCase__ = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
lowerCamelCase__ = SparkExamplesIterable(__snake_case ).shard_data_sources(worker_id=0 ,num_workers=2 )
assert shard_it_a.n_shards == 2
lowerCamelCase__ = _get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case ,[0, 2] )
for i, (row_id, row_dict) in enumerate(__snake_case ):
lowerCamelCase__ , lowerCamelCase__ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
lowerCamelCase__ = SparkExamplesIterable(__snake_case ).shard_data_sources(worker_id=1 ,num_workers=2 )
assert shard_it_a.n_shards == 2
lowerCamelCase__ = _get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case ,[1, 3] )
for i, (row_id, row_dict) in enumerate(__snake_case ):
lowerCamelCase__ , lowerCamelCase__ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCAmelCase__() -> str:
'''simple docstring'''
lowerCamelCase__ = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
lowerCamelCase__ = spark.range(100 ).repartition(1 )
lowerCamelCase__ = Spark(__snake_case )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 29
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __A :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=9_9 , __lowerCAmelCase=3_2 , __lowerCAmelCase=2 , __lowerCAmelCase=4 , __lowerCAmelCase=3_7 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=1_6 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , __lowerCAmelCase=0 , ):
'''simple docstring'''
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_input_mask
lowerCamelCase__ = use_token_type_ids
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = type_sequence_label_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = num_labels
lowerCamelCase__ = num_choices
lowerCamelCase__ = scope
lowerCamelCase__ = projection_dim
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ = None
if self.use_token_type_ids:
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , )
lowerCamelCase__ = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = TFDPRContextEncoder(config=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = TFDPRQuestionEncoder(config=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = TFDPRReader(config=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = config_and_inputs
lowerCamelCase__ = {'''input_ids''': input_ids}
return config, inputs_dict
@require_tf
class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ = {"""feature-extraction""": TFDPRQuestionEncoder} if is_tf_available() else {}
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = TFDPRModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*__lowerCAmelCase )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFDPRContextEncoder.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFDPRContextEncoder.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFDPRQuestionEncoder.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFDPRReader.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_tf
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''' )
lowerCamelCase__ = tf.constant(
[[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_0_3, 2_0_2_6, 3_8_9_9, 1_0_1_4_0, 1_0_2_9, 1_0_2]] ) # [CLS] hello, is my dog cute? [SEP]
lowerCamelCase__ = model(__lowerCAmelCase )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
lowerCamelCase__ = tf.constant(
[
[
0.0323_6253,
0.1275_3335,
0.1681_8509,
0.0027_9786,
0.389_6933,
0.2426_4945,
0.217_8971,
-0.0233_5227,
-0.0848_1959,
-0.1432_4117,
]
] )
self.assertTrue(numpy.allclose(output[:, :1_0].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 29
| 1
|
from __future__ import annotations
def lowerCAmelCase__(__snake_case ) -> bool:
'''simple docstring'''
lowerCamelCase__ = str(__snake_case )
return n == n[::-1]
def lowerCAmelCase__(__snake_case = 1000000 ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = 0
for i in range(1 ,__snake_case ):
if is_palindrome(__snake_case ) and is_palindrome(bin(__snake_case ).split('''b''' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 29
|
import string
from math import logaa
def lowerCAmelCase__(__snake_case ,__snake_case ) -> int:
'''simple docstring'''
lowerCamelCase__ = document.translate(
str.maketrans('''''' ,'''''' ,string.punctuation ) ).replace('''\n''' ,'''''' )
lowerCamelCase__ = document_without_punctuation.split(''' ''' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def lowerCAmelCase__(__snake_case ,__snake_case ) -> tuple[int, int]:
'''simple docstring'''
lowerCamelCase__ = corpus.lower().translate(
str.maketrans('''''' ,'''''' ,string.punctuation ) ) # strip all punctuation and replace it with ''
lowerCamelCase__ = corpus_without_punctuation.split('''\n''' )
lowerCamelCase__ = term.lower()
return (len([doc for doc in docs if term in doc] ), len(__snake_case ))
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case=False ) -> float:
'''simple docstring'''
if smoothing:
if n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(1 + logaa(n / (1 + df) ) ,3 )
if df == 0:
raise ZeroDivisionError('''df must be > 0''' )
elif n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(logaa(n / df ) ,3 )
def lowerCAmelCase__(__snake_case ,__snake_case ) -> float:
'''simple docstring'''
return round(tf * idf ,3 )
| 29
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_a = {
"configuration_graphormer": ["GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "GraphormerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"GraphormerForGraphClassification",
"GraphormerModel",
"GraphormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 29
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_a = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 29
| 1
|
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = (DDPMScheduler,)
def __lowerCamelCase ( self , **__lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**__lowerCAmelCase )
return config
def __lowerCamelCase ( self ):
'''simple docstring'''
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__lowerCAmelCase , beta_end=__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.check_over_configs(thresholding=__lowerCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__lowerCAmelCase , prediction_type=__lowerCAmelCase , sample_max_value=__lowerCAmelCase , )
def __lowerCamelCase ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config()
lowerCamelCase__ = scheduler_class(**__lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_0979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1E-5
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config()
lowerCamelCase__ = scheduler_class(**__lowerCAmelCase )
lowerCamelCase__ = len(__lowerCAmelCase )
lowerCamelCase__ = self.dummy_model()
lowerCamelCase__ = self.dummy_sample_deter
lowerCamelCase__ = torch.manual_seed(0 )
for t in reversed(range(__lowerCAmelCase ) ):
# 1. predict noise residual
lowerCamelCase__ = model(__lowerCAmelCase , __lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
lowerCamelCase__ = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCamelCase__ = pred_prev_sample
lowerCamelCase__ = torch.sum(torch.abs(__lowerCAmelCase ) )
lowerCamelCase__ = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowerCamelCase__ = scheduler_class(**__lowerCAmelCase )
lowerCamelCase__ = len(__lowerCAmelCase )
lowerCamelCase__ = self.dummy_model()
lowerCamelCase__ = self.dummy_sample_deter
lowerCamelCase__ = torch.manual_seed(0 )
for t in reversed(range(__lowerCAmelCase ) ):
# 1. predict noise residual
lowerCamelCase__ = model(__lowerCAmelCase , __lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
lowerCamelCase__ = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCamelCase__ = pred_prev_sample
lowerCamelCase__ = torch.sum(torch.abs(__lowerCAmelCase ) )
lowerCamelCase__ = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config()
lowerCamelCase__ = scheduler_class(**__lowerCAmelCase )
lowerCamelCase__ = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=__lowerCAmelCase )
lowerCamelCase__ = scheduler.timesteps
for i, timestep in enumerate(__lowerCAmelCase ):
if i == len(__lowerCAmelCase ) - 1:
lowerCamelCase__ = -1
else:
lowerCamelCase__ = timesteps[i + 1]
lowerCamelCase__ = scheduler.previous_timestep(__lowerCAmelCase )
lowerCamelCase__ = prev_t.item()
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config()
lowerCamelCase__ = scheduler_class(**__lowerCAmelCase )
lowerCamelCase__ = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(__lowerCAmelCase , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config()
lowerCamelCase__ = scheduler_class(**__lowerCAmelCase )
lowerCamelCase__ = [1_0_0, 8_7, 5_0, 1, 0]
lowerCamelCase__ = len(__lowerCAmelCase )
with self.assertRaises(__lowerCAmelCase , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=__lowerCAmelCase , timesteps=__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config()
lowerCamelCase__ = scheduler_class(**__lowerCAmelCase )
lowerCamelCase__ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__lowerCAmelCase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=__lowerCAmelCase )
| 29
|
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
_a = namedtuple(
"_TestCommandArgs",
[
"dataset",
"name",
"cache_dir",
"data_dir",
"all_configs",
"save_infos",
"ignore_verifications",
"force_redownload",
"clear_cache",
],
defaults=[None, None, None, False, False, False, False, False],
)
def lowerCAmelCase__(__snake_case ,__snake_case ) -> List[str]:
'''simple docstring'''
return (abs(source - target ) / target) < 0.0_1
@pytest.mark.integration
def lowerCAmelCase__(__snake_case ) -> Tuple:
'''simple docstring'''
lowerCamelCase__ = _TestCommandArgs(dataset=__snake_case ,all_configs=__snake_case ,save_infos=__snake_case )
lowerCamelCase__ = TestCommand(*__snake_case )
test_command.run()
lowerCamelCase__ = os.path.join(__snake_case ,'''README.md''' )
assert os.path.exists(__snake_case )
lowerCamelCase__ = DatasetInfosDict.from_directory(__snake_case )
lowerCamelCase__ = DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ) ,splits=[
{
'''name''': '''train''',
'''num_bytes''': 2351563,
'''num_examples''': 10000,
},
{
'''name''': '''validation''',
'''num_bytes''': 238418,
'''num_examples''': 1000,
},
] ,download_size=3940680 ,dataset_size=2589981 ,)
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowerCamelCase__ , lowerCamelCase__ = getattr(dataset_infos['''default'''] ,__snake_case ), getattr(expected_dataset_infos['''default'''] ,__snake_case )
if key == "num_bytes":
assert is_apercent_close(__snake_case ,__snake_case )
elif key == "splits":
assert list(__snake_case ) == list(__snake_case )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes ,expected[split].num_bytes )
else:
result == expected
| 29
| 1
|
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 29
|
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class __A :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = parent
lowerCamelCase__ = 1_3
lowerCamelCase__ = 7
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = 9_9
lowerCamelCase__ = 3_2
lowerCamelCase__ = 2
lowerCamelCase__ = 4
lowerCamelCase__ = 3_7
lowerCamelCase__ = '''gelu'''
lowerCamelCase__ = 0.1
lowerCamelCase__ = 0.1
lowerCamelCase__ = 5_1_2
lowerCamelCase__ = 1_6
lowerCamelCase__ = 2
lowerCamelCase__ = 0.02
lowerCamelCase__ = 3
lowerCamelCase__ = 4
lowerCamelCase__ = None
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self ):
'''simple docstring'''
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = self.prepare_config_and_inputs()
lowerCamelCase__ = True
lowerCamelCase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = TFEsmModel(config=__lowerCAmelCase )
lowerCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowerCamelCase__ = model(__lowerCAmelCase )
lowerCamelCase__ = [input_ids, input_mask]
lowerCamelCase__ = model(__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = True
lowerCamelCase__ = TFEsmModel(config=__lowerCAmelCase )
lowerCamelCase__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''encoder_hidden_states''': encoder_hidden_states,
'''encoder_attention_mask''': encoder_attention_mask,
}
lowerCamelCase__ = model(__lowerCAmelCase )
lowerCamelCase__ = [input_ids, input_mask]
lowerCamelCase__ = model(__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase )
# Also check the case where encoder outputs are not passed
lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = TFEsmForMaskedLM(config=__lowerCAmelCase )
lowerCamelCase__ = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = TFEsmForTokenClassification(config=__lowerCAmelCase )
lowerCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowerCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = config_and_inputs
lowerCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ = (
{
"""feature-extraction""": TFEsmModel,
"""fill-mask""": TFEsmForMaskedLM,
"""text-classification""": TFEsmForSequenceClassification,
"""token-classification""": TFEsmForTokenClassification,
"""zero-shot""": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = TFEsmModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFEsmModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@unittest.skip('''Protein models do not support embedding resizing.''' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip('''Protein models do not support embedding resizing.''' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(__lowerCAmelCase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowerCamelCase__ = model.get_bias()
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
for k, v in name.items():
assert isinstance(__lowerCAmelCase , tf.Variable )
else:
lowerCamelCase__ = model.get_output_embeddings()
assert x is None
lowerCamelCase__ = model.get_bias()
assert name is None
@require_tf
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = TFEsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
lowerCamelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase__ = model(__lowerCAmelCase )[0]
lowerCamelCase__ = [1, 6, 3_3]
self.assertEqual(list(output.numpy().shape ) , __lowerCAmelCase )
# compare the actual values for a slice.
lowerCamelCase__ = tf.constant(
[
[
[8.92_1518, -10.58_9814, -6.467_1307],
[-6.396_7156, -13.91_1377, -1.121_1915],
[-7.78_1247, -13.95_1557, -3.74_0592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = TFEsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
lowerCamelCase__ = tf.constant([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
lowerCamelCase__ = model(__lowerCAmelCase )[0]
# compare the actual values for a slice.
lowerCamelCase__ = tf.constant(
[
[
[0.1444_3092, 0.5412_5327, 0.324_7739],
[0.3034_0484, 0.0052_6676, 0.3107_7722],
[0.3227_8043, -0.2498_7096, 0.341_4628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 29
| 1
|
import math
def lowerCAmelCase__(__snake_case ) -> bool:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
lowerCamelCase__ = range(3 ,int(math.sqrt(__snake_case ) + 1 ) ,2 )
return not any(not number % i for i in odd_numbers )
def lowerCAmelCase__(__snake_case ,__snake_case=1 ,**__snake_case ) -> Any:
'''simple docstring'''
lowerCamelCase__ = factor * value
lowerCamelCase__ = value
while not is_prime(__snake_case ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 ,**__snake_case )
return value
| 29
|
from math import sqrt
def lowerCAmelCase__(__snake_case ) -> bool:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (
number >= 0
), "'number' must been an int and positive"
lowerCamelCase__ = True
# 0 and 1 are none primes.
if number <= 1:
lowerCamelCase__ = False
for divisor in range(2 ,int(round(sqrt(__snake_case ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowerCamelCase__ = False
break
# precondition
assert isinstance(__snake_case ,__snake_case ), "'status' must been from type bool"
return status
def lowerCAmelCase__(__snake_case ) -> Any:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowerCamelCase__ = list(range(2 ,n + 1 ) )
lowerCamelCase__ = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(__snake_case ) ):
for j in range(i + 1 ,len(__snake_case ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowerCamelCase__ = 0
# filters actual prime numbers.
lowerCamelCase__ = [x for x in begin_list if x != 0]
# precondition
assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type list"
return ans
def lowerCAmelCase__(__snake_case ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (n > 2), "'N' must been an int and > 2"
lowerCamelCase__ = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 ,n + 1 ):
if is_prime(__snake_case ):
ans.append(__snake_case )
# precondition
assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type list"
return ans
def lowerCAmelCase__(__snake_case ) -> List[str]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and number >= 0, "'number' must been an int and >= 0"
lowerCamelCase__ = [] # this list will be returns of the function.
# potential prime number factors.
lowerCamelCase__ = 2
lowerCamelCase__ = number
if number == 0 or number == 1:
ans.append(__snake_case )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(__snake_case ):
while quotient != 1:
if is_prime(__snake_case ) and (quotient % factor == 0):
ans.append(__snake_case )
quotient /= factor
else:
factor += 1
else:
ans.append(__snake_case )
# precondition
assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type list"
return ans
def lowerCAmelCase__(__snake_case ) -> List[Any]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCamelCase__ = 0
# prime factorization of 'number'
lowerCamelCase__ = prime_factorization(__snake_case )
lowerCamelCase__ = max(__snake_case )
# precondition
assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type int"
return ans
def lowerCAmelCase__(__snake_case ) -> Dict:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCamelCase__ = 0
# prime factorization of 'number'
lowerCamelCase__ = prime_factorization(__snake_case )
lowerCamelCase__ = min(__snake_case )
# precondition
assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type int"
return ans
def lowerCAmelCase__(__snake_case ) -> List[Any]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ), "'number' must been an int"
assert isinstance(number % 2 == 0 ,__snake_case ), "compare bust been from type bool"
return number % 2 == 0
def lowerCAmelCase__(__snake_case ) -> List[str]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ), "'number' must been an int"
assert isinstance(number % 2 != 0 ,__snake_case ), "compare bust been from type bool"
return number % 2 != 0
def lowerCAmelCase__(__snake_case ) -> List[Any]:
'''simple docstring'''
assert (
isinstance(__snake_case ,__snake_case ) and (number > 2) and is_even(__snake_case )
), "'number' must been an int, even and > 2"
lowerCamelCase__ = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowerCamelCase__ = get_prime_numbers(__snake_case )
lowerCamelCase__ = len(__snake_case )
# run variable for while-loops.
lowerCamelCase__ = 0
lowerCamelCase__ = None
# exit variable. for break up the loops
lowerCamelCase__ = True
while i < len_pn and loop:
lowerCamelCase__ = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowerCamelCase__ = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(__snake_case ,__snake_case )
and (len(__snake_case ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowerCAmelCase__(__snake_case ,__snake_case ) -> str:
'''simple docstring'''
assert (
isinstance(__snake_case ,__snake_case )
and isinstance(__snake_case ,__snake_case )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowerCamelCase__ = 0
while numbera != 0:
lowerCamelCase__ = numbera % numbera
lowerCamelCase__ = numbera
lowerCamelCase__ = rest
# precondition
assert isinstance(__snake_case ,__snake_case ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Any:
'''simple docstring'''
assert (
isinstance(__snake_case ,__snake_case )
and isinstance(__snake_case ,__snake_case )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowerCamelCase__ = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowerCamelCase__ = prime_factorization(__snake_case )
lowerCamelCase__ = prime_factorization(__snake_case )
elif numbera == 1 or numbera == 1:
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = max(__snake_case ,__snake_case )
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowerCamelCase__ = prime_fac_a.count(__snake_case )
lowerCamelCase__ = prime_fac_a.count(__snake_case )
for _ in range(max(__snake_case ,__snake_case ) ):
ans *= n
else:
lowerCamelCase__ = prime_fac_a.count(__snake_case )
for _ in range(__snake_case ):
ans *= n
done.append(__snake_case )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowerCamelCase__ = prime_fac_a.count(__snake_case )
for _ in range(__snake_case ):
ans *= n
done.append(__snake_case )
# precondition
assert isinstance(__snake_case ,__snake_case ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowerCAmelCase__(__snake_case ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (n >= 0), "'number' must been a positive int"
lowerCamelCase__ = 0
lowerCamelCase__ = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(__snake_case ):
ans += 1
# precondition
assert isinstance(__snake_case ,__snake_case ) and is_prime(
__snake_case ), "'ans' must been a prime number and from type int"
return ans
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Dict:
'''simple docstring'''
assert (
is_prime(__snake_case ) and is_prime(__snake_case ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowerCamelCase__ = p_number_a + 1 # jump to the next number
lowerCamelCase__ = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(__snake_case ):
number += 1
while number < p_number_a:
ans.append(__snake_case )
number += 1
# fetch the next prime number.
while not is_prime(__snake_case ):
number += 1
# precondition
assert (
isinstance(__snake_case ,__snake_case )
and ans[0] != p_number_a
and ans[len(__snake_case ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowerCAmelCase__(__snake_case ) -> Tuple:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (n >= 1), "'n' must been int and >= 1"
lowerCamelCase__ = [] # will be returned.
for divisor in range(1 ,n + 1 ):
if n % divisor == 0:
ans.append(__snake_case )
# precondition
assert ans[0] == 1 and ans[len(__snake_case ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowerCAmelCase__(__snake_case ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (
number > 1
), "'number' must been an int and >= 1"
lowerCamelCase__ = get_divisors(__snake_case )
# precondition
assert (
isinstance(__snake_case ,__snake_case )
and (divisors[0] == 1)
and (divisors[len(__snake_case ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Tuple:
'''simple docstring'''
assert (
isinstance(__snake_case ,__snake_case )
and isinstance(__snake_case ,__snake_case )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowerCamelCase__ = gcd(abs(__snake_case ) ,abs(__snake_case ) )
# precondition
assert (
isinstance(__snake_case ,__snake_case )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowerCAmelCase__(__snake_case ) -> Optional[int]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (n >= 0), "'n' must been a int and >= 0"
lowerCamelCase__ = 1 # this will be return.
for factor in range(1 ,n + 1 ):
ans *= factor
return ans
def lowerCAmelCase__(__snake_case ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (n >= 0), "'n' must been an int and >= 0"
lowerCamelCase__ = 0
lowerCamelCase__ = 1
lowerCamelCase__ = 1 # this will be return
for _ in range(n - 1 ):
lowerCamelCase__ = ans
ans += fiba
lowerCamelCase__ = tmp
return ans
| 29
| 1
|
def lowerCAmelCase__(__snake_case ,__snake_case ) -> int:
'''simple docstring'''
return int((input_a, input_a).count(0 ) == 0 )
def lowerCAmelCase__() -> None:
'''simple docstring'''
assert and_gate(0 ,0 ) == 0
assert and_gate(0 ,1 ) == 0
assert and_gate(1 ,0 ) == 0
assert and_gate(1 ,1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 29
|
from __future__ import annotations
def lowerCAmelCase__(__snake_case ,__snake_case = None ,__snake_case = None ) -> None:
'''simple docstring'''
if start is None:
lowerCamelCase__ = 0
if end is None:
lowerCamelCase__ = len(__snake_case ) - 1
if start >= end:
return
lowerCamelCase__ = (start + end) // 2
slowsort(__snake_case ,__snake_case ,__snake_case )
slowsort(__snake_case ,mid + 1 ,__snake_case )
if sequence[end] < sequence[mid]:
lowerCamelCase__ , lowerCamelCase__ = sequence[mid], sequence[end]
slowsort(__snake_case ,__snake_case ,end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 29
| 1
|
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
_a = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = field(default=lowerCAmelCase , metadata={"""help""": """Whether to use SortishSampler or not."""} )
lowerCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
lowerCAmelCase_ = field(
default=lowerCAmelCase , metadata={
"""help""": (
"""The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `max_length` value of the model configuration."""
)
} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase , metadata={
"""help""": (
"""The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `num_beams` value of the model configuration."""
)
} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase , metadata={
"""help""": """Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."""
} , )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = super().to_dict()
for k, v in d.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ = v.to_dict()
return d
| 29
|
from __future__ import annotations
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> float:
'''simple docstring'''
if days_between_payments <= 0:
raise ValueError('''days_between_payments must be > 0''' )
if daily_interest_rate < 0:
raise ValueError('''daily_interest_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * daily_interest_rate * days_between_payments
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,) -> float:
'''simple docstring'''
if number_of_compounding_periods <= 0:
raise ValueError('''number_of_compounding_periods must be > 0''' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,) -> float:
'''simple docstring'''
if number_of_years <= 0:
raise ValueError('''number_of_years must be > 0''' )
if nominal_annual_percentage_rate < 0:
raise ValueError('''nominal_annual_percentage_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return compound_interest(
__snake_case ,nominal_annual_percentage_rate / 365 ,number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29
| 1
|
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
_a = logging.get_logger(__name__)
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case=None ,__snake_case=None ) -> List[str]:
'''simple docstring'''
if "." in tensor_name:
lowerCamelCase__ = tensor_name.split('''.''' )
for split in splits[:-1]:
lowerCamelCase__ = getattr(__snake_case ,__snake_case )
if new_module is None:
raise ValueError(F'{module} has no attribute {split}.' )
lowerCamelCase__ = new_module
lowerCamelCase__ = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F'{module} does not have a parameter or a buffer named {tensor_name}.' )
lowerCamelCase__ = tensor_name in module._buffers
lowerCamelCase__ = getattr(__snake_case ,__snake_case )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(F'{tensor_name} is on the meta device, we need a `value` to put in on {device}.' )
lowerCamelCase__ = False
lowerCamelCase__ = False
if is_buffer or not is_bitsandbytes_available():
lowerCamelCase__ = False
lowerCamelCase__ = False
else:
lowerCamelCase__ = hasattr(bnb.nn ,'''Params4bit''' ) and isinstance(module._parameters[tensor_name] ,bnb.nn.Paramsabit )
lowerCamelCase__ = isinstance(module._parameters[tensor_name] ,bnb.nn.IntaParams )
if is_abit or is_abit:
lowerCamelCase__ = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
lowerCamelCase__ = old_value.to(__snake_case )
elif isinstance(__snake_case ,torch.Tensor ):
lowerCamelCase__ = value.to('''cpu''' )
if value.dtype == torch.inta:
lowerCamelCase__ = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
lowerCamelCase__ = torch.tensor(__snake_case ,device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls ,__snake_case ) and fpaa_statistics is None:
lowerCamelCase__ = new_value.T
lowerCamelCase__ = old_value.__dict__
if is_abit:
lowerCamelCase__ = bnb.nn.IntaParams(__snake_case ,requires_grad=__snake_case ,**__snake_case ).to(__snake_case )
elif is_abit:
lowerCamelCase__ = bnb.nn.Paramsabit(__snake_case ,requires_grad=__snake_case ,**__snake_case ).to(__snake_case )
lowerCamelCase__ = new_value
if fpaa_statistics is not None:
setattr(module.weight ,'''SCB''' ,fpaa_statistics.to(__snake_case ) )
else:
if value is None:
lowerCamelCase__ = old_value.to(__snake_case )
elif isinstance(__snake_case ,torch.Tensor ):
lowerCamelCase__ = value.to(__snake_case )
else:
lowerCamelCase__ = torch.tensor(__snake_case ,device=__snake_case )
if is_buffer:
lowerCamelCase__ = new_value
else:
lowerCamelCase__ = nn.Parameter(__snake_case ,requires_grad=old_value.requires_grad )
lowerCamelCase__ = new_value
def lowerCAmelCase__(__snake_case ,__snake_case=None ,__snake_case=None ,__snake_case=None ,__snake_case=False ) -> Tuple:
'''simple docstring'''
for name, module in model.named_children():
if current_key_name is None:
lowerCamelCase__ = []
current_key_name.append(__snake_case )
if (isinstance(__snake_case ,nn.Linear ) or isinstance(__snake_case ,__snake_case )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(__snake_case ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(__snake_case ,__snake_case ):
lowerCamelCase__ , lowerCamelCase__ = module.weight.shape
else:
lowerCamelCase__ = module.in_features
lowerCamelCase__ = module.out_features
if quantization_config.quantization_method() == "llm_int8":
lowerCamelCase__ = bnb.nn.LinearabitLt(
__snake_case ,__snake_case ,module.bias is not None ,has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight ,threshold=quantization_config.llm_inta_threshold ,)
lowerCamelCase__ = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
lowerCamelCase__ = bnb.nn.Linearabit(
__snake_case ,__snake_case ,module.bias is not None ,quantization_config.bnb_abit_compute_dtype ,compress_statistics=quantization_config.bnb_abit_use_double_quant ,quant_type=quantization_config.bnb_abit_quant_type ,)
lowerCamelCase__ = True
# Store the module class in case we need to transpose the weight later
lowerCamelCase__ = type(__snake_case )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(__snake_case )
if len(list(module.children() ) ) > 0:
lowerCamelCase__ , lowerCamelCase__ = _replace_with_bnb_linear(
__snake_case ,__snake_case ,__snake_case ,__snake_case ,has_been_replaced=__snake_case ,)
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowerCAmelCase__(__snake_case ,__snake_case=None ,__snake_case=None ,__snake_case=None ) -> Dict:
'''simple docstring'''
lowerCamelCase__ = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
lowerCamelCase__ , lowerCamelCase__ = _replace_with_bnb_linear(
__snake_case ,__snake_case ,__snake_case ,__snake_case )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def lowerCAmelCase__(*__snake_case ,**__snake_case ) -> Optional[int]:
'''simple docstring'''
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' ,__snake_case ,)
return replace_with_bnb_linear(*__snake_case ,**__snake_case )
def lowerCAmelCase__(*__snake_case ,**__snake_case ) -> Optional[int]:
'''simple docstring'''
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' ,__snake_case ,)
return set_module_quantized_tensor_to_device(*__snake_case ,**__snake_case )
def lowerCAmelCase__(__snake_case ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = deepcopy(__snake_case ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
lowerCamelCase__ = find_tied_parameters(__snake_case )
# For compatibility with Accelerate < 0.18
if isinstance(__snake_case ,__snake_case ):
lowerCamelCase__ = sum(list(tied_params.values() ) ,[] ) + list(tied_params.keys() )
else:
lowerCamelCase__ = sum(__snake_case ,[] )
lowerCamelCase__ = len(__snake_case ) > 0
# Check if it is a base model
lowerCamelCase__ = not hasattr(__snake_case ,model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowerCamelCase__ = list(model.named_children() )
lowerCamelCase__ = [list_modules[-1][0]]
# add last module together with tied weights
lowerCamelCase__ = set(__snake_case ) - set(__snake_case )
lowerCamelCase__ = list(set(__snake_case ) ) + list(__snake_case )
# remove ".weight" from the keys
lowerCamelCase__ = ['''.weight''', '''.bias''']
lowerCamelCase__ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowerCamelCase__ = name.replace(__snake_case ,'''''' )
filtered_module_names.append(__snake_case )
return filtered_module_names
| 29
|
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowerCAmelCase__(__snake_case ) -> Union[str, Any]:
'''simple docstring'''
def wrapper(*__snake_case ,**__snake_case ):
lowerCamelCase__ = timeit.default_timer()
lowerCamelCase__ = func(*__snake_case ,**__snake_case )
lowerCamelCase__ = timeit.default_timer() - starttime
return delta
lowerCamelCase__ = func.__name__
return wrapper
def lowerCAmelCase__(__snake_case ,__snake_case=100 ,__snake_case=None ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = []
lowerCamelCase__ = seq_shapes or {}
for i in range(__snake_case ):
lowerCamelCase__ = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(__snake_case ,_ArrayXD ):
lowerCamelCase__ = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(__snake_case ,datasets.Value ):
if v.dtype == "string":
lowerCamelCase__ = '''The small grey turtle was surprisingly fast when challenged.'''
else:
lowerCamelCase__ = np.random.randint(10 ,size=1 ).astype(v.dtype ).item()
elif isinstance(__snake_case ,datasets.Sequence ):
while isinstance(__snake_case ,datasets.Sequence ):
lowerCamelCase__ = v.feature
lowerCamelCase__ = seq_shapes[k]
lowerCamelCase__ = np.random.rand(*__snake_case ).astype(v.dtype )
lowerCamelCase__ = data
dummy_data.append((i, example) )
return dummy_data
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case=100 ,__snake_case=None ) -> str:
'''simple docstring'''
lowerCamelCase__ = generate_examples(__snake_case ,num_examples=__snake_case ,seq_shapes=__snake_case )
with ArrowWriter(features=__snake_case ,path=__snake_case ) as writer:
for key, record in dummy_data:
lowerCamelCase__ = features.encode_example(__snake_case )
writer.write(__snake_case )
lowerCamelCase__ , lowerCamelCase__ = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F'Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.' )
lowerCamelCase__ = datasets.Dataset.from_file(filename=__snake_case ,info=datasets.DatasetInfo(features=__snake_case ) )
return dataset
| 29
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_a = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 29
|
def lowerCAmelCase__(__snake_case ) -> int:
'''simple docstring'''
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''' )
for cell_n in range(1 ,len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
lowerCamelCase__ = grid[0]
for row_n in range(1 ,len(__snake_case ) ):
lowerCamelCase__ = grid[row_n]
lowerCamelCase__ = fill_row(__snake_case ,__snake_case )
lowerCamelCase__ = grid[row_n]
return grid[-1][-1]
def lowerCAmelCase__(__snake_case ,__snake_case ) -> list:
'''simple docstring'''
current_row[0] += row_above[0]
for cell_n in range(1 ,len(__snake_case ) ):
current_row[cell_n] += min(current_row[cell_n - 1] ,row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29
| 1
|
import math
from collections.abc import Iterator
from itertools import takewhile
def lowerCAmelCase__(__snake_case ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(__snake_case ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCAmelCase__() -> Iterator[int]:
'''simple docstring'''
lowerCamelCase__ = 2
while True:
if is_prime(__snake_case ):
yield num
num += 1
def lowerCAmelCase__(__snake_case = 2000000 ) -> int:
'''simple docstring'''
return sum(takewhile(lambda __snake_case : x < n ,prime_generator() ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 29
|
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
_a = logging.get_logger(__name__)
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase_ = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(glue_processors.keys() )} )
lowerCAmelCase_ = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
lowerCAmelCase_ = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.task_name.lower()
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = """train"""
lowerCAmelCase_ = """dev"""
lowerCAmelCase_ = """test"""
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = Split.train , __lowerCAmelCase = None , ):
'''simple docstring'''
warnings.warn(
'''This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' , __lowerCAmelCase , )
lowerCamelCase__ = args
lowerCamelCase__ = glue_processors[args.task_name]()
lowerCamelCase__ = glue_output_modes[args.task_name]
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
try:
lowerCamelCase__ = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
# Load data features from cache or dataset file
lowerCamelCase__ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , )
lowerCamelCase__ = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCamelCase__ , lowerCamelCase__ = label_list[2], label_list[1]
lowerCamelCase__ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase__ = cached_features_file + '''.lock'''
with FileLock(__lowerCAmelCase ):
if os.path.exists(__lowerCAmelCase ) and not args.overwrite_cache:
lowerCamelCase__ = time.time()
lowerCamelCase__ = torch.load(__lowerCAmelCase )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
else:
logger.info(F'Creating features from dataset file at {args.data_dir}' )
if mode == Split.dev:
lowerCamelCase__ = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
lowerCamelCase__ = self.processor.get_test_examples(args.data_dir )
else:
lowerCamelCase__ = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
lowerCamelCase__ = examples[:limit_length]
lowerCamelCase__ = glue_convert_examples_to_features(
__lowerCAmelCase , __lowerCAmelCase , max_length=args.max_seq_length , label_list=__lowerCAmelCase , output_mode=self.output_mode , )
lowerCamelCase__ = time.time()
torch.save(self.features , __lowerCAmelCase )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self , __lowerCAmelCase ):
'''simple docstring'''
return self.features[i]
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.label_list
| 29
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class __A ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=7 , __lowerCAmelCase=3 , __lowerCAmelCase=1_8 , __lowerCAmelCase=3_0 , __lowerCAmelCase=4_0_0 , __lowerCAmelCase=True , __lowerCAmelCase=3_2 , __lowerCAmelCase=True , ):
'''simple docstring'''
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = image_size
lowerCamelCase__ = min_resolution
lowerCamelCase__ = max_resolution
lowerCamelCase__ = do_resize
lowerCamelCase__ = size_divisor
lowerCamelCase__ = do_rescale
def __lowerCamelCase ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class __A ( lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = GLPNImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = GLPNImageProcessingTester(self )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''size_divisor''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''resample''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_rescale''' ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowerCamelCase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowerCamelCase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowerCamelCase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 29
|
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
_a = datasets.logging.get_logger(__name__)
_a = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
_a = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
_a = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case=False ,__snake_case=False ,__snake_case=True ,__snake_case=False ,__snake_case="dummy_doc" ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = {doc: key_lines}
lowerCamelCase__ = {doc: sys_lines}
lowerCamelCase__ = {}
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ , lowerCamelCase__ = reader.get_doc_mentions(__snake_case ,key_doc_lines[doc] ,__snake_case )
key_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase__ = reader.set_annotated_parse_trees(__snake_case ,key_doc_lines[doc] ,__snake_case ,__snake_case )
lowerCamelCase__ , lowerCamelCase__ = reader.get_doc_mentions(__snake_case ,sys_doc_lines[doc] ,__snake_case )
sys_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase__ = reader.set_annotated_parse_trees(__snake_case ,key_doc_lines[doc] ,__snake_case ,__snake_case )
if remove_nested:
lowerCamelCase__ , lowerCamelCase__ = reader.remove_nested_coref_mentions(__snake_case ,__snake_case )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowerCamelCase__ , lowerCamelCase__ = reader.remove_nested_coref_mentions(__snake_case ,__snake_case )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowerCamelCase__ = reader.get_mention_assignments(__snake_case ,__snake_case )
lowerCamelCase__ = reader.get_mention_assignments(__snake_case ,__snake_case )
lowerCamelCase__ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
F'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' )
logger.info(
'''Number of resulting singleton clusters in the key '''
F'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' )
if not keep_singletons:
logger.info(
F'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '
'''files, respectively''' )
return doc_coref_infos
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> str:
'''simple docstring'''
lowerCamelCase__ = get_coref_infos(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
lowerCamelCase__ = {}
lowerCamelCase__ = 0
lowerCamelCase__ = 0
for name, metric in metrics:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = evaluator.evaluate_documents(__snake_case ,__snake_case ,beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'{name}/recall': recall, F'{name}/precision': precision, F'{name}/f1': fa} )
logger.info(
name.ljust(10 ) ,F'Recall: {recall * 100:.2f}' ,F' Precision: {precision * 100:.2f}' ,F' F1: {fa * 100:.2f}' ,)
if conll_subparts_num == 3:
lowerCamelCase__ = (conll / 3) * 100
logger.info(F'CoNLL score: {conll:.2f}' )
output_scores.update({'''conll_score''': conll} )
return output_scores
def lowerCAmelCase__(__snake_case ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
lowerCamelCase__ = line.split()[5]
if not parse_col == "-":
lowerCamelCase__ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
'''simple docstring'''
def __lowerCamelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Sequence(datasets.Value('''string''' ) ),
} ) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] , )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=False ):
'''simple docstring'''
lowerCamelCase__ = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
lowerCamelCase__ = util.check_gold_parse_annotation(__lowerCAmelCase )
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowerCamelCase__ = evaluate(
key_lines=__lowerCAmelCase , sys_lines=__lowerCAmelCase , metrics=__lowerCAmelCase , NP_only=__lowerCAmelCase , remove_nested=__lowerCAmelCase , keep_singletons=__lowerCAmelCase , min_span=__lowerCAmelCase , )
return score
| 29
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = """realm"""
def __init__( self , __lowerCAmelCase=3_0_5_2_2 , __lowerCAmelCase=7_6_8 , __lowerCAmelCase=1_2_8 , __lowerCAmelCase=1_2 , __lowerCAmelCase=1_2 , __lowerCAmelCase=8 , __lowerCAmelCase=3_0_7_2 , __lowerCAmelCase="gelu_new" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=2_5_6 , __lowerCAmelCase=1_0 , __lowerCAmelCase=1E-3 , __lowerCAmelCase=5 , __lowerCAmelCase=3_2_0 , __lowerCAmelCase=1_3_3_5_3_7_1_8 , __lowerCAmelCase=5_0_0_0 , __lowerCAmelCase=1 , __lowerCAmelCase=0 , __lowerCAmelCase=2 , **__lowerCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
# Common config
lowerCamelCase__ = vocab_size
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = hidden_size
lowerCamelCase__ = retriever_proj_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = num_candidates
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = initializer_range
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = layer_norm_eps
# Reader config
lowerCamelCase__ = span_hidden_size
lowerCamelCase__ = max_span_width
lowerCamelCase__ = reader_layer_norm_eps
lowerCamelCase__ = reader_beam_size
lowerCamelCase__ = reader_seq_len
# Retrieval config
lowerCamelCase__ = num_block_records
lowerCamelCase__ = searcher_beam_size
| 29
|
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
_a = open # noqa: we just need to have a builtin inside this module to test it properly
| 29
| 1
|
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
def lowerCAmelCase__(__snake_case ) -> Tuple:
'''simple docstring'''
lowerCamelCase__ = OrderedDict()
for key, value in state_dict.items():
if key.startswith('''module.encoder''' ):
lowerCamelCase__ = key.replace('''module.encoder''' ,'''glpn.encoder''' )
if key.startswith('''module.decoder''' ):
lowerCamelCase__ = key.replace('''module.decoder''' ,'''decoder.stages''' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCamelCase__ = key[key.find('''patch_embed''' ) + len('''patch_embed''' )]
lowerCamelCase__ = key.replace(F'patch_embed{idx}' ,F'patch_embeddings.{int(__snake_case )-1}' )
if "norm" in key:
lowerCamelCase__ = key.replace('''norm''' ,'''layer_norm''' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCamelCase__ = key[key.find('''glpn.encoder.layer_norm''' ) + len('''glpn.encoder.layer_norm''' )]
lowerCamelCase__ = key.replace(F'layer_norm{idx}' ,F'layer_norm.{int(__snake_case )-1}' )
if "layer_norm1" in key:
lowerCamelCase__ = key.replace('''layer_norm1''' ,'''layer_norm_1''' )
if "layer_norm2" in key:
lowerCamelCase__ = key.replace('''layer_norm2''' ,'''layer_norm_2''' )
if "block" in key:
# replace for example block1 by block.0
lowerCamelCase__ = key[key.find('''block''' ) + len('''block''' )]
lowerCamelCase__ = key.replace(F'block{idx}' ,F'block.{int(__snake_case )-1}' )
if "attn.q" in key:
lowerCamelCase__ = key.replace('''attn.q''' ,'''attention.self.query''' )
if "attn.proj" in key:
lowerCamelCase__ = key.replace('''attn.proj''' ,'''attention.output.dense''' )
if "attn" in key:
lowerCamelCase__ = key.replace('''attn''' ,'''attention.self''' )
if "fc1" in key:
lowerCamelCase__ = key.replace('''fc1''' ,'''dense1''' )
if "fc2" in key:
lowerCamelCase__ = key.replace('''fc2''' ,'''dense2''' )
if "linear_pred" in key:
lowerCamelCase__ = key.replace('''linear_pred''' ,'''classifier''' )
if "linear_fuse" in key:
lowerCamelCase__ = key.replace('''linear_fuse.conv''' ,'''linear_fuse''' )
lowerCamelCase__ = key.replace('''linear_fuse.bn''' ,'''batch_norm''' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCamelCase__ = key[key.find('''linear_c''' ) + len('''linear_c''' )]
lowerCamelCase__ = key.replace(F'linear_c{idx}' ,F'linear_c.{int(__snake_case )-1}' )
if "bot_conv" in key:
lowerCamelCase__ = key.replace('''bot_conv''' ,'''0.convolution''' )
if "skip_conv1" in key:
lowerCamelCase__ = key.replace('''skip_conv1''' ,'''1.convolution''' )
if "skip_conv2" in key:
lowerCamelCase__ = key.replace('''skip_conv2''' ,'''2.convolution''' )
if "fusion1" in key:
lowerCamelCase__ = key.replace('''fusion1''' ,'''1.fusion''' )
if "fusion2" in key:
lowerCamelCase__ = key.replace('''fusion2''' ,'''2.fusion''' )
if "fusion3" in key:
lowerCamelCase__ = key.replace('''fusion3''' ,'''3.fusion''' )
if "fusion" in key and "conv" in key:
lowerCamelCase__ = key.replace('''conv''' ,'''convolutional_layer''' )
if key.startswith('''module.last_layer_depth''' ):
lowerCamelCase__ = key.replace('''module.last_layer_depth''' ,'''head.head''' )
lowerCamelCase__ = value
return new_state_dict
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Union[str, Any]:
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCamelCase__ = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.weight' )
lowerCamelCase__ = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
lowerCamelCase__ = kv_weight[
: config.hidden_sizes[i], :
]
lowerCamelCase__ = kv_bias[: config.hidden_sizes[i]]
lowerCamelCase__ = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCamelCase__ = kv_bias[config.hidden_sizes[i] :]
def lowerCAmelCase__() -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase__ = Image.open(requests.get(__snake_case ,stream=__snake_case ).raw )
return image
@torch.no_grad()
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case=False ,__snake_case=None ) -> int:
'''simple docstring'''
lowerCamelCase__ = GLPNConfig(hidden_sizes=[64, 128, 320, 512] ,decoder_hidden_size=64 ,depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
lowerCamelCase__ = GLPNImageProcessor()
# prepare image
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = image_processor(images=__snake_case ,return_tensors='''pt''' ).pixel_values
logger.info('''Converting model...''' )
# load original state dict
lowerCamelCase__ = torch.load(__snake_case ,map_location=torch.device('''cpu''' ) )
# rename keys
lowerCamelCase__ = rename_keys(__snake_case )
# key and value matrices need special treatment
read_in_k_v(__snake_case ,__snake_case )
# create HuggingFace model and load state dict
lowerCamelCase__ = GLPNForDepthEstimation(__snake_case )
model.load_state_dict(__snake_case )
model.eval()
# forward pass
lowerCamelCase__ = model(__snake_case )
lowerCamelCase__ = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowerCamelCase__ = torch.tensor(
[[4.4_1_4_7, 4.0_8_7_3, 4.0_6_7_3], [3.7_8_9_0, 3.2_8_8_1, 3.1_5_2_5], [3.7_6_7_4, 3.5_4_2_3, 3.4_9_1_3]] )
elif "kitti" in model_name:
lowerCamelCase__ = torch.tensor(
[[3.4_2_9_1, 2.7_8_6_5, 2.5_1_5_1], [3.2_8_4_1, 2.7_0_2_1, 2.3_5_0_2], [3.1_1_4_7, 2.4_6_2_5, 2.2_4_8_1]] )
else:
raise ValueError(F'Unknown model name: {model_name}' )
lowerCamelCase__ = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] ,__snake_case ,atol=1E-4 )
print('''Looks ok!''' )
# finally, push to hub if required
if push_to_hub:
logger.info('''Pushing model and image processor to the hub...''' )
model.push_to_hub(
repo_path_or_name=Path(__snake_case ,__snake_case ) ,organization='''nielsr''' ,commit_message='''Add model''' ,use_temp_dir=__snake_case ,)
image_processor.push_to_hub(
repo_path_or_name=Path(__snake_case ,__snake_case ) ,organization='''nielsr''' ,commit_message='''Add image processor''' ,use_temp_dir=__snake_case ,)
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path",
default=None,
type=str,
help="Path to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
parser.add_argument(
"--model_name",
default="glpn-kitti",
type=str,
help="Name of the model in case you're pushing to the hub.",
)
_a = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 29
|
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
_a = logging.get_logger(__name__)
class __A :
'''simple docstring'''
lowerCAmelCase_ = None
@experimental
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> Tuple:
'''simple docstring'''
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
return _map_with_joblib(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> int:
'''simple docstring'''
lowerCamelCase__ = num_proc if num_proc <= len(__snake_case ) else len(__snake_case )
lowerCamelCase__ = [] # We organize the splits ourselve (contiguous splits)
for index in range(__snake_case ):
lowerCamelCase__ = len(__snake_case ) // num_proc
lowerCamelCase__ = len(__snake_case ) % num_proc
lowerCamelCase__ = div * index + min(__snake_case ,__snake_case )
lowerCamelCase__ = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(__snake_case ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
F'Error dividing inputs iterable among processes. '
F'Total number of objects {len(__snake_case )}, '
F'length: {sum(len(i[1] ) for i in split_kwds )}' )
logger.info(
F'Spawning {num_proc} processes for {len(__snake_case )} objects in slices of {[len(i[1] ) for i in split_kwds]}' )
lowerCamelCase__ , lowerCamelCase__ = None, None
if not disable_tqdm:
lowerCamelCase__ , lowerCamelCase__ = (RLock(),), tqdm.set_lock
with Pool(__snake_case ,initargs=__snake_case ,initializer=__snake_case ) as pool:
lowerCamelCase__ = pool.map(__snake_case ,__snake_case )
logger.info(F'Finished {num_proc} processes' )
lowerCamelCase__ = [obj for proc_res in mapped for obj in proc_res]
logger.info(F'Unpacked {len(__snake_case )} objects' )
return mapped
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> List[str]:
'''simple docstring'''
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name ,n_jobs=__snake_case ):
return joblib.Parallel()(
joblib.delayed(__snake_case )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def lowerCAmelCase__(__snake_case ) -> int:
'''simple docstring'''
lowerCamelCase__ = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
lowerCamelCase__ = None
| 29
| 1
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class __A ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = '''ZinengTang/tvlt-base'''
lowerCamelCase__ = tempfile.mkdtemp()
def __lowerCamelCase ( self , **__lowerCAmelCase ):
'''simple docstring'''
return TvltImageProcessor.from_pretrained(self.checkpoint , **__lowerCAmelCase )
def __lowerCamelCase ( self , **__lowerCAmelCase ):
'''simple docstring'''
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_feature_extractor()
lowerCamelCase__ = TvltProcessor(image_processor=__lowerCAmelCase , feature_extractor=__lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , __lowerCAmelCase )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_feature_extractor()
lowerCamelCase__ = TvltProcessor(image_processor=__lowerCAmelCase , feature_extractor=__lowerCAmelCase )
lowerCamelCase__ = np.ones([1_2_0_0_0] )
lowerCamelCase__ = feature_extractor(__lowerCAmelCase , return_tensors='''np''' )
lowerCamelCase__ = processor(audio=__lowerCAmelCase , return_tensors='''np''' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_feature_extractor()
lowerCamelCase__ = TvltProcessor(image_processor=__lowerCAmelCase , feature_extractor=__lowerCAmelCase )
lowerCamelCase__ = np.ones([3, 2_2_4, 2_2_4] )
lowerCamelCase__ = image_processor(__lowerCAmelCase , return_tensors='''np''' )
lowerCamelCase__ = processor(images=__lowerCAmelCase , return_tensors='''np''' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_feature_extractor()
lowerCamelCase__ = TvltProcessor(image_processor=__lowerCAmelCase , feature_extractor=__lowerCAmelCase )
lowerCamelCase__ = np.ones([1_2_0_0_0] )
lowerCamelCase__ = np.ones([3, 2_2_4, 2_2_4] )
lowerCamelCase__ = processor(audio=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] )
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase ):
processor()
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_feature_extractor()
lowerCamelCase__ = TvltProcessor(image_processor=__lowerCAmelCase , feature_extractor=__lowerCAmelCase )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
| 29
|
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = 42
class __A ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
@register_to_config
def __init__( self , __lowerCAmelCase = 1_6 , __lowerCAmelCase = 8_8 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = 1 , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 3_2 , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = "geglu" , __lowerCAmelCase = True , __lowerCAmelCase = True , ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = attention_head_dim
lowerCamelCase__ = num_attention_heads * attention_head_dim
lowerCamelCase__ = in_channels
lowerCamelCase__ = torch.nn.GroupNorm(num_groups=__lowerCAmelCase , num_channels=__lowerCAmelCase , eps=1E-6 , affine=__lowerCAmelCase )
lowerCamelCase__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase )
# 3. Define transformers blocks
lowerCamelCase__ = nn.ModuleList(
[
BasicTransformerBlock(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , dropout=__lowerCAmelCase , cross_attention_dim=__lowerCAmelCase , activation_fn=__lowerCAmelCase , attention_bias=__lowerCAmelCase , double_self_attention=__lowerCAmelCase , norm_elementwise_affine=__lowerCAmelCase , )
for d in range(__lowerCAmelCase )
] )
lowerCamelCase__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=1 , __lowerCAmelCase=None , __lowerCAmelCase = True , ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = hidden_states.shape
lowerCamelCase__ = batch_frames // num_frames
lowerCamelCase__ = hidden_states
lowerCamelCase__ = hidden_states[None, :].reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
lowerCamelCase__ = self.norm(__lowerCAmelCase )
lowerCamelCase__ = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , __lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = self.proj_in(__lowerCAmelCase )
# 2. Blocks
for block in self.transformer_blocks:
lowerCamelCase__ = block(
__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , timestep=__lowerCAmelCase , cross_attention_kwargs=__lowerCAmelCase , class_labels=__lowerCAmelCase , )
# 3. Output
lowerCamelCase__ = self.proj_out(__lowerCAmelCase )
lowerCamelCase__ = (
hidden_states[None, None, :]
.reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
lowerCamelCase__ = hidden_states.reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=__lowerCAmelCase )
| 29
| 1
|
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
_a = namedtuple(
"_TestCommandArgs",
[
"dataset",
"name",
"cache_dir",
"data_dir",
"all_configs",
"save_infos",
"ignore_verifications",
"force_redownload",
"clear_cache",
],
defaults=[None, None, None, False, False, False, False, False],
)
def lowerCAmelCase__(__snake_case ,__snake_case ) -> List[str]:
'''simple docstring'''
return (abs(source - target ) / target) < 0.0_1
@pytest.mark.integration
def lowerCAmelCase__(__snake_case ) -> Tuple:
'''simple docstring'''
lowerCamelCase__ = _TestCommandArgs(dataset=__snake_case ,all_configs=__snake_case ,save_infos=__snake_case )
lowerCamelCase__ = TestCommand(*__snake_case )
test_command.run()
lowerCamelCase__ = os.path.join(__snake_case ,'''README.md''' )
assert os.path.exists(__snake_case )
lowerCamelCase__ = DatasetInfosDict.from_directory(__snake_case )
lowerCamelCase__ = DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ) ,splits=[
{
'''name''': '''train''',
'''num_bytes''': 2351563,
'''num_examples''': 10000,
},
{
'''name''': '''validation''',
'''num_bytes''': 238418,
'''num_examples''': 1000,
},
] ,download_size=3940680 ,dataset_size=2589981 ,)
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowerCamelCase__ , lowerCamelCase__ = getattr(dataset_infos['''default'''] ,__snake_case ), getattr(expected_dataset_infos['''default'''] ,__snake_case )
if key == "num_bytes":
assert is_apercent_close(__snake_case ,__snake_case )
elif key == "splits":
assert list(__snake_case ) == list(__snake_case )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes ,expected[split].num_bytes )
else:
result == expected
| 29
|
_a = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
_a = [{"type": "code", "content": INSTALL_CONTENT}]
_a = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 29
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_a = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ["XLNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ["XLNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLNetForMultipleChoice",
"XLNetForQuestionAnswering",
"XLNetForQuestionAnsweringSimple",
"XLNetForSequenceClassification",
"XLNetForTokenClassification",
"XLNetLMHeadModel",
"XLNetModel",
"XLNetPreTrainedModel",
"load_tf_weights_in_xlnet",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLNetForMultipleChoice",
"TFXLNetForQuestionAnsweringSimple",
"TFXLNetForSequenceClassification",
"TFXLNetForTokenClassification",
"TFXLNetLMHeadModel",
"TFXLNetMainLayer",
"TFXLNetModel",
"TFXLNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 29
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_a = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
_a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 29
| 1
|
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case=[] ) -> List[Any]:
'''simple docstring'''
lowerCamelCase__ = size[0] - overlap_pixels * 2
lowerCamelCase__ = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
lowerCamelCase__ = np.ones((size_y, size_x) ,dtype=np.uinta ) * 255
lowerCamelCase__ = np.pad(__snake_case ,mode='''linear_ramp''' ,pad_width=__snake_case ,end_values=0 )
if "l" in remove_borders:
lowerCamelCase__ = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
lowerCamelCase__ = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
lowerCamelCase__ = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
lowerCamelCase__ = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> Any:
'''simple docstring'''
return max(__snake_case ,min(__snake_case ,__snake_case ) )
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> Tuple:
'''simple docstring'''
return (
clamp(rect[0] ,min[0] ,max[0] ),
clamp(rect[1] ,min[1] ,max[1] ),
clamp(rect[2] ,min[0] ,max[0] ),
clamp(rect[3] ,min[1] ,max[1] ),
)
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> str:
'''simple docstring'''
lowerCamelCase__ = list(__snake_case )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
lowerCamelCase__ = clamp_rect(__snake_case ,[0, 0] ,[image_size[0], image_size[1]] )
return rect
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> int:
'''simple docstring'''
lowerCamelCase__ = Image.new('''RGB''' ,(tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) ,Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) ,(0, 0) ,)
result.paste(__snake_case ,(original_slice, 0) )
return result
def lowerCAmelCase__(__snake_case ,__snake_case ) -> str:
'''simple docstring'''
lowerCamelCase__ = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
lowerCamelCase__ = tile.crop(__snake_case )
return tile
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = n % d
return n - divisor
class __A ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 3_5_0 , ):
'''simple docstring'''
super().__init__(
vae=__lowerCAmelCase , text_encoder=__lowerCAmelCase , tokenizer=__lowerCAmelCase , unet=__lowerCAmelCase , low_res_scheduler=__lowerCAmelCase , scheduler=__lowerCAmelCase , max_noise_level=__lowerCAmelCase , )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
lowerCamelCase__ = add_overlap_rect(__lowerCAmelCase , __lowerCAmelCase , image.size )
lowerCamelCase__ = image.crop(__lowerCAmelCase )
lowerCamelCase__ = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
lowerCamelCase__ = translated_slice_x - (original_image_slice / 2)
lowerCamelCase__ = max(0 , __lowerCAmelCase )
lowerCamelCase__ = squeeze_tile(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = to_input.size
lowerCamelCase__ = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
lowerCamelCase__ = super(__lowerCAmelCase , self ).__call__(image=__lowerCAmelCase , **__lowerCAmelCase ).images[0]
lowerCamelCase__ = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
lowerCamelCase__ = unsqueeze_tile(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
lowerCamelCase__ = []
if x == 0:
remove_borders.append('''l''' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('''r''' )
if y == 0:
remove_borders.append('''t''' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('''b''' )
lowerCamelCase__ = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=__lowerCAmelCase ) , mode='''L''' , )
final_image.paste(
__lowerCAmelCase , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , __lowerCAmelCase )
@torch.no_grad()
def __call__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 7_5 , __lowerCAmelCase = 9.0 , __lowerCAmelCase = 5_0 , __lowerCAmelCase = None , __lowerCAmelCase = 1 , __lowerCAmelCase = 0.0 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = 1 , __lowerCAmelCase = 1_2_8 , __lowerCAmelCase = 3_2 , __lowerCAmelCase = 3_2 , ):
'''simple docstring'''
lowerCamelCase__ = Image.new('''RGB''' , (image.size[0] * 4, image.size[1] * 4) )
lowerCamelCase__ = math.ceil(image.size[0] / tile_size )
lowerCamelCase__ = math.ceil(image.size[1] / tile_size )
lowerCamelCase__ = tcx * tcy
lowerCamelCase__ = 0
for y in range(__lowerCAmelCase ):
for x in range(__lowerCAmelCase ):
self._process_tile(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , prompt=__lowerCAmelCase , num_inference_steps=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , noise_level=__lowerCAmelCase , negative_prompt=__lowerCAmelCase , num_images_per_prompt=__lowerCAmelCase , eta=__lowerCAmelCase , generator=__lowerCAmelCase , latents=__lowerCAmelCase , )
current_count += 1
if callback is not None:
callback({'''progress''': current_count / total_tile_count, '''image''': final_image} )
return final_image
def lowerCAmelCase__() -> List[Any]:
'''simple docstring'''
lowerCamelCase__ = '''stabilityai/stable-diffusion-x4-upscaler'''
lowerCamelCase__ = StableDiffusionTiledUpscalePipeline.from_pretrained(__snake_case ,revision='''fp16''' ,torch_dtype=torch.floataa )
lowerCamelCase__ = pipe.to('''cuda''' )
lowerCamelCase__ = Image.open('''../../docs/source/imgs/diffusers_library.jpg''' )
def callback(__snake_case ):
print(F'progress: {obj["progress"]:.4f}' )
obj["image"].save('''diffusers_library_progress.jpg''' )
lowerCamelCase__ = pipe(image=__snake_case ,prompt='''Black font, white background, vector''' ,noise_level=40 ,callback=__snake_case )
final_image.save('''diffusers_library.jpg''' )
if __name__ == "__main__":
main()
| 29
|
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
_a = logging.get_logger(__name__)
class __A ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
warnings.warn(
'''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use OwlViTImageProcessor instead.''' , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
| 29
| 1
|
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class __A ( enum.Enum ):
'''simple docstring'''
lowerCAmelCase_ = 0
lowerCAmelCase_ = 1
lowerCAmelCase_ = 2
@add_end_docstrings(lowerCAmelCase )
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = """
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
"""
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
lowerCamelCase__ = None
if self.model.config.prefix is not None:
lowerCamelCase__ = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
lowerCamelCase__ = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self._sanitize_parameters(prefix=__lowerCAmelCase , **self._forward_params )
lowerCamelCase__ = {**self._preprocess_params, **preprocess_params}
lowerCamelCase__ = {**self._forward_params, **forward_params}
def __lowerCamelCase ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = {}
if prefix is not None:
lowerCamelCase__ = prefix
if prefix:
lowerCamelCase__ = self.tokenizer(
__lowerCAmelCase , padding=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=self.framework )
lowerCamelCase__ = prefix_inputs['''input_ids'''].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F'{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'
''' [None, \'hole\']''' )
lowerCamelCase__ = handle_long_generation
preprocess_params.update(__lowerCAmelCase )
lowerCamelCase__ = generate_kwargs
lowerCamelCase__ = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' )
if return_tensors is not None:
raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' )
lowerCamelCase__ = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' )
lowerCamelCase__ = ReturnType.TENSORS
if return_type is not None:
lowerCamelCase__ = return_type
if clean_up_tokenization_spaces is not None:
lowerCamelCase__ = clean_up_tokenization_spaces
if stop_sequence is not None:
lowerCamelCase__ = self.tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
if len(__lowerCAmelCase ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
lowerCamelCase__ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'''add_space_before_punct_symbol''': True} )
return super()._parse_and_tokenize(*__lowerCAmelCase , **__lowerCAmelCase )
def __call__( self , __lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase="" , __lowerCAmelCase=None , **__lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.tokenizer(
prefix + prompt_text , padding=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=self.framework )
lowerCamelCase__ = prompt_text
if handle_long_generation == "hole":
lowerCamelCase__ = inputs['''input_ids'''].shape[-1]
if "max_new_tokens" in generate_kwargs:
lowerCamelCase__ = generate_kwargs['''max_new_tokens''']
else:
lowerCamelCase__ = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('''We cannot infer how many new tokens are expected''' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
lowerCamelCase__ = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'''We cannot use `hole` to handle this generation the number of desired tokens exceeds the'''
''' models max length''' )
lowerCamelCase__ = inputs['''input_ids'''][:, -keep_length:]
if "attention_mask" in inputs:
lowerCamelCase__ = inputs['''attention_mask'''][:, -keep_length:]
return inputs
def __lowerCamelCase ( self , __lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = model_inputs['''input_ids''']
lowerCamelCase__ = model_inputs.get('''attention_mask''' , __lowerCAmelCase )
# Allow empty prompts
if input_ids.shape[1] == 0:
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = 1
else:
lowerCamelCase__ = input_ids.shape[0]
lowerCamelCase__ = model_inputs.pop('''prompt_text''' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
lowerCamelCase__ = generate_kwargs.pop('''prefix_length''' , 0 )
if prefix_length > 0:
lowerCamelCase__ = '''max_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].max_new_tokens is not None
)
if not has_max_new_tokens:
lowerCamelCase__ = generate_kwargs.get('''max_length''' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
lowerCamelCase__ = '''min_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
lowerCamelCase__ = self.model.generate(input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase , **__lowerCAmelCase )
lowerCamelCase__ = generated_sequence.shape[0]
if self.framework == "pt":
lowerCamelCase__ = generated_sequence.reshape(__lowerCAmelCase , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
lowerCamelCase__ = tf.reshape(__lowerCAmelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase=ReturnType.FULL_TEXT , __lowerCAmelCase=True ):
'''simple docstring'''
lowerCamelCase__ = model_outputs['''generated_sequence'''][0]
lowerCamelCase__ = model_outputs['''input_ids''']
lowerCamelCase__ = model_outputs['''prompt_text''']
lowerCamelCase__ = generated_sequence.numpy().tolist()
lowerCamelCase__ = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
lowerCamelCase__ = {'''generated_token_ids''': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
lowerCamelCase__ = self.tokenizer.decode(
__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
lowerCamelCase__ = 0
else:
lowerCamelCase__ = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , ) )
if return_type == ReturnType.FULL_TEXT:
lowerCamelCase__ = prompt_text + text[prompt_length:]
else:
lowerCamelCase__ = text[prompt_length:]
lowerCamelCase__ = {'''generated_text''': all_text}
records.append(__lowerCAmelCase )
return records
| 29
|
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> Any:
'''simple docstring'''
lowerCamelCase__ = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowerCamelCase__ = {
'''wmt16-en-de-dist-12-1''': [2_8.3, 2_7.5_2],
'''wmt16-en-de-dist-6-1''': [2_7.4, 2_7.1_1],
'''wmt16-en-de-12-1''': [2_6.9, 2_5.7_5],
}
lowerCamelCase__ = F'{src_lang}-{tgt_lang}'
lowerCamelCase__ = F'\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "allenai/{model_name}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n'
model_card_dir.mkdir(parents=__snake_case ,exist_ok=__snake_case )
lowerCamelCase__ = os.path.join(__snake_case ,'''README.md''' )
print(F'Generating {path}' )
with open(__snake_case ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(__snake_case )
# make sure we are under the root of the project
_a = Path(__file__).resolve().parent.parent.parent
_a = repo_dir / "model_cards"
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
_a = model_cards_dir / "allenai" / model_name
write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
| 29
| 1
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = """blenderbot-small"""
lowerCAmelCase_ = ["""past_key_values"""]
lowerCAmelCase_ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , __lowerCAmelCase=5_0_2_6_5 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=8 , __lowerCAmelCase=2_0_4_8 , __lowerCAmelCase=1_6 , __lowerCAmelCase=8 , __lowerCAmelCase=2_0_4_8 , __lowerCAmelCase=1_6 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase="gelu" , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1 , __lowerCAmelCase=False , __lowerCAmelCase=0 , __lowerCAmelCase=1 , __lowerCAmelCase=2 , __lowerCAmelCase=2 , **__lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = vocab_size
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = d_model
lowerCamelCase__ = encoder_ffn_dim
lowerCamelCase__ = encoder_layers
lowerCamelCase__ = encoder_attention_heads
lowerCamelCase__ = decoder_ffn_dim
lowerCamelCase__ = decoder_layers
lowerCamelCase__ = decoder_attention_heads
lowerCamelCase__ = dropout
lowerCamelCase__ = attention_dropout
lowerCamelCase__ = activation_dropout
lowerCamelCase__ = activation_function
lowerCamelCase__ = init_std
lowerCamelCase__ = encoder_layerdrop
lowerCamelCase__ = decoder_layerdrop
lowerCamelCase__ = use_cache
lowerCamelCase__ = encoder_layers
lowerCamelCase__ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , is_encoder_decoder=__lowerCAmelCase , decoder_start_token_id=__lowerCAmelCase , forced_eos_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
class __A ( lowerCAmelCase ):
'''simple docstring'''
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
lowerCamelCase__ = {0: '''batch'''}
lowerCamelCase__ = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
lowerCamelCase__ = {0: '''batch''', 1: '''decoder_sequence'''}
lowerCamelCase__ = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(__lowerCAmelCase , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowerCamelCase__ = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
lowerCamelCase__ , lowerCamelCase__ = self.num_layers
for i in range(__lowerCAmelCase ):
lowerCamelCase__ = {0: '''batch''', 2: '''past_sequence + sequence'''}
lowerCamelCase__ = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
lowerCamelCase__ = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ = super().outputs
else:
lowerCamelCase__ = super(__lowerCAmelCase , self ).outputs
if self.use_past:
lowerCamelCase__ , lowerCamelCase__ = self.num_layers
for i in range(__lowerCAmelCase ):
lowerCamelCase__ = {0: '''batch''', 2: '''past_sequence + sequence'''}
lowerCamelCase__ = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = -1 , __lowerCAmelCase = -1 , __lowerCAmelCase = False , __lowerCAmelCase = None , ):
'''simple docstring'''
lowerCamelCase__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Generate decoder inputs
lowerCamelCase__ = seq_length if not self.use_past else 1
lowerCamelCase__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = {F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
lowerCamelCase__ = dict(**__lowerCAmelCase , **__lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowerCamelCase__ , lowerCamelCase__ = common_inputs['''input_ids'''].shape
lowerCamelCase__ = common_inputs['''decoder_input_ids'''].shape[1]
lowerCamelCase__ , lowerCamelCase__ = self.num_attention_heads
lowerCamelCase__ = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCamelCase__ = decoder_seq_length + 3
lowerCamelCase__ = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowerCamelCase__ = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(__lowerCAmelCase , __lowerCAmelCase )] , dim=1 )
lowerCamelCase__ = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowerCamelCase__ , lowerCamelCase__ = self.num_layers
lowerCamelCase__ = min(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = max(__lowerCAmelCase , __lowerCAmelCase ) - min_num_layers
lowerCamelCase__ = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(__lowerCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__lowerCAmelCase ),
torch.zeros(__lowerCAmelCase ),
torch.zeros(__lowerCAmelCase ),
torch.zeros(__lowerCAmelCase ),
) )
# TODO: test this.
lowerCamelCase__ = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(__lowerCAmelCase , __lowerCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(__lowerCAmelCase ), torch.zeros(__lowerCAmelCase )) )
return common_inputs
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = -1 , __lowerCAmelCase = -1 , __lowerCAmelCase = False , __lowerCAmelCase = None , ):
'''simple docstring'''
lowerCamelCase__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowerCamelCase__ , lowerCamelCase__ = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowerCamelCase__ = seqlen + 2
lowerCamelCase__ , lowerCamelCase__ = self.num_layers
lowerCamelCase__ , lowerCamelCase__ = self.num_attention_heads
lowerCamelCase__ = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCamelCase__ = common_inputs['''attention_mask'''].dtype
lowerCamelCase__ = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(__lowerCAmelCase , __lowerCAmelCase , dtype=__lowerCAmelCase )] , dim=1 )
lowerCamelCase__ = [
(torch.zeros(__lowerCAmelCase ), torch.zeros(__lowerCAmelCase )) for _ in range(__lowerCAmelCase )
]
return common_inputs
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = -1 , __lowerCAmelCase = -1 , __lowerCAmelCase = False , __lowerCAmelCase = None , ):
'''simple docstring'''
lowerCamelCase__ = compute_effective_axis_dimension(
__lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCamelCase__ = tokenizer.num_special_tokens_to_add(__lowerCAmelCase )
lowerCamelCase__ = compute_effective_axis_dimension(
__lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
lowerCamelCase__ = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
lowerCamelCase__ = dict(tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase ) )
return common_inputs
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = -1 , __lowerCAmelCase = -1 , __lowerCAmelCase = False , __lowerCAmelCase = None , ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase )
elif self.task == "causal-lm":
lowerCamelCase__ = self._generate_dummy_inputs_for_causal_lm(
__lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase )
else:
lowerCamelCase__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase )
return common_inputs
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ = super()._flatten_past_key_values_(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
lowerCamelCase__ = super(__lowerCAmelCase , self )._flatten_past_key_values_(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
| 29
|
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
_a = logging.get_logger(__name__)
class __A ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
warnings.warn(
'''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use SegformerImageProcessor instead.''' , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
| 29
| 1
|
from collections.abc import Iterable
from typing import Generic, TypeVar
_a = TypeVar("_T")
class __A ( Generic[_T] ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase = None ):
'''simple docstring'''
lowerCamelCase__ = list(iterable or [] )
lowerCamelCase__ = []
def __len__( self ):
'''simple docstring'''
return len(self._stacka ) + len(self._stacka )
def __repr__( self ):
'''simple docstring'''
return F'Queue({tuple(self._stacka[::-1] + self._stacka )})'
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
self._stacka.append(__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self._stacka.pop
lowerCamelCase__ = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError('''Queue is empty''' )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 29
|
from queue import PriorityQueue
from typing import Any
import numpy as np
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,) -> float | int:
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
lowerCamelCase__ = cst_fwd.get(__snake_case ,np.inf )
lowerCamelCase__ = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
lowerCamelCase__ = new_cost_f
lowerCamelCase__ = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
lowerCamelCase__ = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> int:
'''simple docstring'''
lowerCamelCase__ = -1
lowerCamelCase__ = set()
lowerCamelCase__ = set()
lowerCamelCase__ = {source: 0}
lowerCamelCase__ = {destination: 0}
lowerCamelCase__ = {source: None}
lowerCamelCase__ = {destination: None}
lowerCamelCase__ = PriorityQueue()
lowerCamelCase__ = PriorityQueue()
lowerCamelCase__ = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
lowerCamelCase__ , lowerCamelCase__ = queue_forward.get()
visited_forward.add(__snake_case )
lowerCamelCase__ , lowerCamelCase__ = queue_backward.get()
visited_backward.add(__snake_case )
lowerCamelCase__ = pass_and_relaxation(
__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,)
lowerCamelCase__ = pass_and_relaxation(
__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,)
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
lowerCamelCase__ = shortest_distance
return shortest_path_distance
_a = {
"B": [["C", 1]],
"C": [["D", 1]],
"D": [["F", 1]],
"E": [["B", 1], ["G", 2]],
"F": [],
"G": [["F", 1]],
}
_a = {
"B": [["E", 1]],
"C": [["B", 1]],
"D": [["C", 1]],
"F": [["D", 1], ["G", 1]],
"E": [[None, np.inf]],
"G": [["E", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29
| 1
|
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __A ( _lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = MobileBertTokenizer
lowerCAmelCase_ = MobileBertTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = filter_non_english
lowerCAmelCase_ = """google/mobilebert-uncased"""
def __lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
lowerCamelCase__ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowerCamelCase__ = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = '''UNwant\u00E9d,running'''
lowerCamelCase__ = '''unwanted, running'''
return input_text, output_text
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.tokenizer_class(self.vocab_file )
lowerCamelCase__ = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(A__ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def __lowerCamelCase ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = self.get_rust_tokenizer()
lowerCamelCase__ = '''UNwant\u00E9d,running'''
lowerCamelCase__ = tokenizer.tokenize(A__ )
lowerCamelCase__ = rust_tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
lowerCamelCase__ = tokenizer.encode(A__ , add_special_tokens=A__ )
lowerCamelCase__ = rust_tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertListEqual(A__ , A__ )
lowerCamelCase__ = self.get_rust_tokenizer()
lowerCamelCase__ = tokenizer.encode(A__ )
lowerCamelCase__ = rust_tokenizer.encode(A__ )
self.assertListEqual(A__ , A__ )
# With lower casing
lowerCamelCase__ = self.get_tokenizer(do_lower_case=A__ )
lowerCamelCase__ = self.get_rust_tokenizer(do_lower_case=A__ )
lowerCamelCase__ = '''UNwant\u00E9d,running'''
lowerCamelCase__ = tokenizer.tokenize(A__ )
lowerCamelCase__ = rust_tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
lowerCamelCase__ = tokenizer.encode(A__ , add_special_tokens=A__ )
lowerCamelCase__ = rust_tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertListEqual(A__ , A__ )
lowerCamelCase__ = self.get_rust_tokenizer()
lowerCamelCase__ = tokenizer.encode(A__ )
lowerCamelCase__ = rust_tokenizer.encode(A__ )
self.assertListEqual(A__ , A__ )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = BasicTokenizer(do_lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = BasicTokenizer(do_lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = BasicTokenizer(do_lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = BasicTokenizer(do_lower_case=A__ , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
lowerCamelCase__ = {}
for i, token in enumerate(A__ ):
lowerCamelCase__ = i
lowerCamelCase__ = WordpieceTokenizer(vocab=A__ , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A__ ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(A__ ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' )
lowerCamelCase__ = tokenizer.encode('''sequence builders''' , add_special_tokens=A__ )
lowerCamelCase__ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=A__ )
lowerCamelCase__ = tokenizer.build_inputs_with_special_tokens(A__ )
lowerCamelCase__ = tokenizer.build_inputs_with_special_tokens(A__ , A__ )
assert encoded_sentence == [1_0_1] + text + [1_0_2]
assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2]
def __lowerCamelCase ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
lowerCamelCase__ = F'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
lowerCamelCase__ = tokenizer_r.encode_plus(
A__ , return_attention_mask=A__ , return_token_type_ids=A__ , return_offsets_mapping=A__ , add_special_tokens=A__ , )
lowerCamelCase__ = tokenizer_r.do_lower_case if hasattr(A__ , '''do_lower_case''' ) else False
lowerCamelCase__ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''Allen'''),
((2_1, 2_3), '''##NL'''),
((2_3, 2_4), '''##P'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''allen'''),
((2_1, 2_3), '''##nl'''),
((2_3, 2_4), '''##p'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = ['''的''', '''人''', '''有''']
lowerCamelCase__ = ''''''.join(A__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCamelCase__ = True
lowerCamelCase__ = self.tokenizer_class.from_pretrained(A__ , **A__ )
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
lowerCamelCase__ = tokenizer_p.encode(A__ , add_special_tokens=A__ )
lowerCamelCase__ = tokenizer_r.encode(A__ , add_special_tokens=A__ )
lowerCamelCase__ = tokenizer_r.convert_ids_to_tokens(A__ )
lowerCamelCase__ = tokenizer_p.convert_ids_to_tokens(A__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A__ , A__ )
self.assertListEqual(A__ , A__ )
lowerCamelCase__ = False
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
lowerCamelCase__ = self.tokenizer_class.from_pretrained(A__ , **A__ )
lowerCamelCase__ = tokenizer_r.encode(A__ , add_special_tokens=A__ )
lowerCamelCase__ = tokenizer_p.encode(A__ , add_special_tokens=A__ )
lowerCamelCase__ = tokenizer_r.convert_ids_to_tokens(A__ )
lowerCamelCase__ = tokenizer_p.convert_ids_to_tokens(A__ )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCamelCase__ = [
F'##{token}' if idx != 0 else token for idx, token in enumerate(A__ )
]
self.assertListEqual(A__ , A__ )
self.assertListEqual(A__ , A__ )
| 700
|
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = """ClapFeatureExtractor"""
lowerCAmelCase_ = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
def __call__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = kwargs.pop('''sampling_rate''' , __lowerCAmelCase )
if text is None and audios is None:
raise ValueError('''You have to specify either text or audios. Both cannot be none.''' )
if text is not None:
lowerCamelCase__ = self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if audios is not None:
lowerCamelCase__ = self.feature_extractor(
__lowerCAmelCase , sampling_rate=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if text is not None and audios is not None:
lowerCamelCase__ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCAmelCase ) , tensor_type=__lowerCAmelCase )
def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.tokenizer.model_input_names
lowerCamelCase__ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 29
| 0
|
from math import pi
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Optional[Any]:
'''simple docstring'''
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 701
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __A :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=9_9 , __lowerCAmelCase=3_2 , __lowerCAmelCase=2 , __lowerCAmelCase=4 , __lowerCAmelCase=3_7 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=1_6 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , __lowerCAmelCase=0 , ):
'''simple docstring'''
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_input_mask
lowerCamelCase__ = use_token_type_ids
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = type_sequence_label_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = num_labels
lowerCamelCase__ = num_choices
lowerCamelCase__ = scope
lowerCamelCase__ = projection_dim
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ = None
if self.use_token_type_ids:
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , )
lowerCamelCase__ = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = TFDPRContextEncoder(config=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = TFDPRQuestionEncoder(config=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = TFDPRReader(config=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = config_and_inputs
lowerCamelCase__ = {'''input_ids''': input_ids}
return config, inputs_dict
@require_tf
class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ = {"""feature-extraction""": TFDPRQuestionEncoder} if is_tf_available() else {}
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = TFDPRModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*__lowerCAmelCase )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFDPRContextEncoder.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFDPRContextEncoder.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFDPRQuestionEncoder.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFDPRReader.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_tf
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''' )
lowerCamelCase__ = tf.constant(
[[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_0_3, 2_0_2_6, 3_8_9_9, 1_0_1_4_0, 1_0_2_9, 1_0_2]] ) # [CLS] hello, is my dog cute? [SEP]
lowerCamelCase__ = model(__lowerCAmelCase )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
lowerCamelCase__ = tf.constant(
[
[
0.0323_6253,
0.1275_3335,
0.1681_8509,
0.0027_9786,
0.389_6933,
0.2426_4945,
0.217_8971,
-0.0233_5227,
-0.0848_1959,
-0.1432_4117,
]
] )
self.assertTrue(numpy.allclose(output[:, :1_0].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 29
| 0
|
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class __A :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=9_9 , __lowerCAmelCase=2_4 , __lowerCAmelCase=2 , __lowerCAmelCase=6 , __lowerCAmelCase=3_7 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=1_6 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=None , __lowerCAmelCase=1_0_0_0 , ):
'''simple docstring'''
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_input_mask
lowerCamelCase__ = use_token_type_ids
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = type_sequence_label_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = num_labels
lowerCamelCase__ = scope
lowerCamelCase__ = range_bbox
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCamelCase__ = bbox[i, j, 3]
lowerCamelCase__ = bbox[i, j, 1]
lowerCamelCase__ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCamelCase__ = bbox[i, j, 2]
lowerCamelCase__ = bbox[i, j, 0]
lowerCamelCase__ = t
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowerCamelCase__ = None
if self.use_token_type_ids:
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ = None
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def __lowerCamelCase ( self ):
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = LiltModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowerCamelCase__ = model(UpperCAmelCase_ , bbox=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
lowerCamelCase__ = model(UpperCAmelCase_ , bbox=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
lowerCamelCase__ = model(UpperCAmelCase_ , bbox=UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = LiltForTokenClassification(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowerCamelCase__ = model(
UpperCAmelCase_ , bbox=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = LiltForQuestionAnswering(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowerCamelCase__ = model(
UpperCAmelCase_ , bbox=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = config_and_inputs
lowerCamelCase__ = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __A ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
return True
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = LiltModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=3_7 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase__ = type
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = LiltModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@require_torch
@slow
class __A ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(UpperCAmelCase_ )
lowerCamelCase__ = torch.tensor([[1, 2]] , device=UpperCAmelCase_ )
lowerCamelCase__ = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=UpperCAmelCase_ )
# forward pass
with torch.no_grad():
lowerCamelCase__ = model(input_ids=UpperCAmelCase_ , bbox=UpperCAmelCase_ )
lowerCamelCase__ = torch.Size([1, 2, 7_6_8] )
lowerCamelCase__ = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=UpperCAmelCase_ , )
self.assertTrue(outputs.last_hidden_state.shape , UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , UpperCAmelCase_ , atol=1E-3 ) )
| 702
|
import string
from math import logaa
def lowerCAmelCase__(__snake_case ,__snake_case ) -> int:
'''simple docstring'''
lowerCamelCase__ = document.translate(
str.maketrans('''''' ,'''''' ,string.punctuation ) ).replace('''\n''' ,'''''' )
lowerCamelCase__ = document_without_punctuation.split(''' ''' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def lowerCAmelCase__(__snake_case ,__snake_case ) -> tuple[int, int]:
'''simple docstring'''
lowerCamelCase__ = corpus.lower().translate(
str.maketrans('''''' ,'''''' ,string.punctuation ) ) # strip all punctuation and replace it with ''
lowerCamelCase__ = corpus_without_punctuation.split('''\n''' )
lowerCamelCase__ = term.lower()
return (len([doc for doc in docs if term in doc] ), len(__snake_case ))
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case=False ) -> float:
'''simple docstring'''
if smoothing:
if n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(1 + logaa(n / (1 + df) ) ,3 )
if df == 0:
raise ZeroDivisionError('''df must be > 0''' )
elif n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(logaa(n / df ) ,3 )
def lowerCAmelCase__(__snake_case ,__snake_case ) -> float:
'''simple docstring'''
return round(tf * idf ,3 )
| 29
| 0
|
'''simple docstring'''
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
_a = {
"iou_prediction_head.layers.0": "iou_prediction_head.proj_in",
"iou_prediction_head.layers.1": "iou_prediction_head.layers.0",
"iou_prediction_head.layers.2": "iou_prediction_head.proj_out",
"mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1",
"mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm",
"mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2",
"mask_downscaling.0": "mask_embed.conv1",
"mask_downscaling.1": "mask_embed.layer_norm1",
"mask_downscaling.3": "mask_embed.conv2",
"mask_downscaling.4": "mask_embed.layer_norm2",
"mask_downscaling.6": "mask_embed.conv3",
"point_embeddings": "point_embed",
"pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding",
"image_encoder": "vision_encoder",
"neck.0": "neck.conv1",
"neck.1": "neck.layer_norm1",
"neck.2": "neck.conv2",
"neck.3": "neck.layer_norm2",
"patch_embed.proj": "patch_embed.projection",
".norm": ".layer_norm",
"blocks": "layers",
}
def lowerCAmelCase__(__snake_case ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = {}
state_dict.pop('''pixel_mean''' ,__snake_case )
state_dict.pop('''pixel_std''' ,__snake_case )
lowerCamelCase__ = R'''.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'''
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
lowerCamelCase__ = key.replace(__snake_case ,__snake_case )
if re.match(__snake_case ,__snake_case ):
lowerCamelCase__ = int(re.match(__snake_case ,__snake_case ).group(2 ) )
if layer_nb == 0:
lowerCamelCase__ = key.replace('''layers.0''' ,'''proj_in''' )
elif layer_nb == 1:
lowerCamelCase__ = key.replace('''layers.1''' ,'''layers.0''' )
elif layer_nb == 2:
lowerCamelCase__ = key.replace('''layers.2''' ,'''proj_out''' )
lowerCamelCase__ = value
lowerCamelCase__ = model_state_dict[
'''prompt_encoder.shared_embedding.positional_embedding'''
]
return model_state_dict
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case="ybelkada/segment-anything" ) -> List[Any]:
'''simple docstring'''
lowerCamelCase__ = hf_hub_download(__snake_case ,F'checkpoints/{model_name}.pth' )
if "sam_vit_b" in model_name:
lowerCamelCase__ = SamConfig()
elif "sam_vit_l" in model_name:
lowerCamelCase__ = SamVisionConfig(
hidden_size=1024 ,num_hidden_layers=24 ,num_attention_heads=16 ,global_attn_indexes=[5, 11, 17, 23] ,)
lowerCamelCase__ = SamConfig(
vision_config=__snake_case ,)
elif "sam_vit_h" in model_name:
lowerCamelCase__ = SamVisionConfig(
hidden_size=1280 ,num_hidden_layers=32 ,num_attention_heads=16 ,global_attn_indexes=[7, 15, 23, 31] ,)
lowerCamelCase__ = SamConfig(
vision_config=__snake_case ,)
lowerCamelCase__ = torch.load(__snake_case ,map_location='''cpu''' )
lowerCamelCase__ = replace_keys(__snake_case )
lowerCamelCase__ = SamImageProcessor()
lowerCamelCase__ = SamProcessor(image_processor=__snake_case )
lowerCamelCase__ = SamModel(__snake_case )
hf_model.load_state_dict(__snake_case )
lowerCamelCase__ = hf_model.to('''cuda''' )
lowerCamelCase__ = '''https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'''
lowerCamelCase__ = Image.open(requests.get(__snake_case ,stream=__snake_case ).raw ).convert('''RGB''' )
lowerCamelCase__ = [[[400, 650]]]
lowerCamelCase__ = [[1]]
lowerCamelCase__ = processor(images=np.array(__snake_case ) ,return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
lowerCamelCase__ = hf_model(**__snake_case )
lowerCamelCase__ = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_7_9_8_9_0_2_5_1_1_5_9_6_6_8
lowerCamelCase__ = processor(
images=np.array(__snake_case ) ,input_points=__snake_case ,input_labels=__snake_case ,return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
lowerCamelCase__ = hf_model(**__snake_case )
lowerCamelCase__ = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_7_1_2_6_0_3_0_9_2_1_9_3_6_0_4
lowerCamelCase__ = ((75, 275, 1725, 850),)
lowerCamelCase__ = processor(images=np.array(__snake_case ) ,input_boxes=__snake_case ,return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
lowerCamelCase__ = hf_model(**__snake_case )
lowerCamelCase__ = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8_6_8_6_0_1_5_6_0_5_9_2_6_5_1_4
# Test with 2 points and 1 image.
lowerCamelCase__ = [[[400, 650], [800, 650]]]
lowerCamelCase__ = [[1, 1]]
lowerCamelCase__ = processor(
images=np.array(__snake_case ) ,input_points=__snake_case ,input_labels=__snake_case ,return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
lowerCamelCase__ = hf_model(**__snake_case )
lowerCamelCase__ = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_9_3_6_0_4_7_7_9_2_4_3_4_6_9_2
if __name__ == "__main__":
_a = argparse.ArgumentParser()
_a = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"]
parser.add_argument(
"--model_name",
default="sam_vit_h_4b8939",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
parser.add_argument(
"--model_hub_id",
default="ybelkada/segment-anything",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
_a = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 703
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_a = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 29
| 0
|
from __future__ import annotations
def lowerCAmelCase__(__snake_case ) -> int:
'''simple docstring'''
if not nums:
return 0
lowerCamelCase__ = nums[0]
lowerCamelCase__ = 0
for num in nums[1:]:
lowerCamelCase__ = (
max_excluding + num,
max(_lowercase ,_lowercase ),
)
return max(_lowercase ,_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704
|
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
_a = namedtuple(
"_TestCommandArgs",
[
"dataset",
"name",
"cache_dir",
"data_dir",
"all_configs",
"save_infos",
"ignore_verifications",
"force_redownload",
"clear_cache",
],
defaults=[None, None, None, False, False, False, False, False],
)
def lowerCAmelCase__(__snake_case ,__snake_case ) -> List[str]:
'''simple docstring'''
return (abs(source - target ) / target) < 0.0_1
@pytest.mark.integration
def lowerCAmelCase__(__snake_case ) -> Tuple:
'''simple docstring'''
lowerCamelCase__ = _TestCommandArgs(dataset=__snake_case ,all_configs=__snake_case ,save_infos=__snake_case )
lowerCamelCase__ = TestCommand(*__snake_case )
test_command.run()
lowerCamelCase__ = os.path.join(__snake_case ,'''README.md''' )
assert os.path.exists(__snake_case )
lowerCamelCase__ = DatasetInfosDict.from_directory(__snake_case )
lowerCamelCase__ = DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ) ,splits=[
{
'''name''': '''train''',
'''num_bytes''': 2351563,
'''num_examples''': 10000,
},
{
'''name''': '''validation''',
'''num_bytes''': 238418,
'''num_examples''': 1000,
},
] ,download_size=3940680 ,dataset_size=2589981 ,)
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowerCamelCase__ , lowerCamelCase__ = getattr(dataset_infos['''default'''] ,__snake_case ), getattr(expected_dataset_infos['''default'''] ,__snake_case )
if key == "num_bytes":
assert is_apercent_close(__snake_case ,__snake_case )
elif key == "splits":
assert list(__snake_case ) == list(__snake_case )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes ,expected[split].num_bytes )
else:
result == expected
| 29
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_a = {
"configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
"tokenization_xlm": ["XLMTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 705
|
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class __A :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = parent
lowerCamelCase__ = 1_3
lowerCamelCase__ = 7
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = 9_9
lowerCamelCase__ = 3_2
lowerCamelCase__ = 2
lowerCamelCase__ = 4
lowerCamelCase__ = 3_7
lowerCamelCase__ = '''gelu'''
lowerCamelCase__ = 0.1
lowerCamelCase__ = 0.1
lowerCamelCase__ = 5_1_2
lowerCamelCase__ = 1_6
lowerCamelCase__ = 2
lowerCamelCase__ = 0.02
lowerCamelCase__ = 3
lowerCamelCase__ = 4
lowerCamelCase__ = None
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self ):
'''simple docstring'''
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = self.prepare_config_and_inputs()
lowerCamelCase__ = True
lowerCamelCase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = TFEsmModel(config=__lowerCAmelCase )
lowerCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowerCamelCase__ = model(__lowerCAmelCase )
lowerCamelCase__ = [input_ids, input_mask]
lowerCamelCase__ = model(__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = True
lowerCamelCase__ = TFEsmModel(config=__lowerCAmelCase )
lowerCamelCase__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''encoder_hidden_states''': encoder_hidden_states,
'''encoder_attention_mask''': encoder_attention_mask,
}
lowerCamelCase__ = model(__lowerCAmelCase )
lowerCamelCase__ = [input_ids, input_mask]
lowerCamelCase__ = model(__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase )
# Also check the case where encoder outputs are not passed
lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = TFEsmForMaskedLM(config=__lowerCAmelCase )
lowerCamelCase__ = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = TFEsmForTokenClassification(config=__lowerCAmelCase )
lowerCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowerCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = config_and_inputs
lowerCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ = (
{
"""feature-extraction""": TFEsmModel,
"""fill-mask""": TFEsmForMaskedLM,
"""text-classification""": TFEsmForSequenceClassification,
"""token-classification""": TFEsmForTokenClassification,
"""zero-shot""": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = TFEsmModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFEsmModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@unittest.skip('''Protein models do not support embedding resizing.''' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip('''Protein models do not support embedding resizing.''' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(__lowerCAmelCase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowerCamelCase__ = model.get_bias()
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
for k, v in name.items():
assert isinstance(__lowerCAmelCase , tf.Variable )
else:
lowerCamelCase__ = model.get_output_embeddings()
assert x is None
lowerCamelCase__ = model.get_bias()
assert name is None
@require_tf
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = TFEsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
lowerCamelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase__ = model(__lowerCAmelCase )[0]
lowerCamelCase__ = [1, 6, 3_3]
self.assertEqual(list(output.numpy().shape ) , __lowerCAmelCase )
# compare the actual values for a slice.
lowerCamelCase__ = tf.constant(
[
[
[8.92_1518, -10.58_9814, -6.467_1307],
[-6.396_7156, -13.91_1377, -1.121_1915],
[-7.78_1247, -13.95_1557, -3.74_0592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = TFEsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
lowerCamelCase__ = tf.constant([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
lowerCamelCase__ = model(__lowerCAmelCase )[0]
# compare the actual values for a slice.
lowerCamelCase__ = tf.constant(
[
[
[0.1444_3092, 0.5412_5327, 0.324_7739],
[0.3034_0484, 0.0052_6676, 0.3107_7722],
[0.3227_8043, -0.2498_7096, 0.341_4628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 29
| 0
|
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
"nielsr/canine-s": 2_048,
}
# Unicode defines 1,114,112 total “codepoints”
_a = 1_114_112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
_a = 0
_a = 0xe_000
_a = 0xe_001
_a = 0xe_002
_a = 0xe_003
_a = 0xe_004
# Maps special codepoints to human-readable names.
_a = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
_a = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class __A ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __lowerCAmelCase=chr(UpperCAmelCase_ ) , __lowerCAmelCase=chr(UpperCAmelCase_ ) , __lowerCAmelCase=chr(UpperCAmelCase_ ) , __lowerCAmelCase=chr(UpperCAmelCase_ ) , __lowerCAmelCase=chr(UpperCAmelCase_ ) , __lowerCAmelCase=chr(UpperCAmelCase_ ) , __lowerCAmelCase=False , __lowerCAmelCase=2_0_4_8 , **__lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else bos_token
lowerCamelCase__ = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else eos_token
lowerCamelCase__ = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else sep_token
lowerCamelCase__ = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else cls_token
lowerCamelCase__ = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else mask_token
super().__init__(
bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , model_max_length=UpperCAmelCase_ , **UpperCAmelCase_ , )
# Creates a mapping for looking up the IDs of special symbols.
lowerCamelCase__ = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
lowerCamelCase__ = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
lowerCamelCase__ = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
lowerCamelCase__ = UNICODE_VOCAB_SIZE
lowerCamelCase__ = len(self._special_codepoints )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self._unicode_vocab_size
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
return list(UpperCAmelCase_ )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
try:
return ord(UpperCAmelCase_ )
except TypeError:
raise ValueError(F'invalid token: \'{token}\'' )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(UpperCAmelCase_ )
except TypeError:
raise ValueError(F'invalid id: {index}' )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
return "".join(UpperCAmelCase_ )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
'''simple docstring'''
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
lowerCamelCase__ = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
lowerCamelCase__ = [1] + ([0] * len(UpperCAmelCase_ )) + [1]
if token_ids_a is not None:
result += ([0] * len(UpperCAmelCase_ )) + [1]
return result
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
'''simple docstring'''
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
lowerCamelCase__ = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
'''simple docstring'''
return ()
| 706
|
from math import sqrt
def lowerCAmelCase__(__snake_case ) -> bool:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (
number >= 0
), "'number' must been an int and positive"
lowerCamelCase__ = True
# 0 and 1 are none primes.
if number <= 1:
lowerCamelCase__ = False
for divisor in range(2 ,int(round(sqrt(__snake_case ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowerCamelCase__ = False
break
# precondition
assert isinstance(__snake_case ,__snake_case ), "'status' must been from type bool"
return status
def lowerCAmelCase__(__snake_case ) -> Any:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowerCamelCase__ = list(range(2 ,n + 1 ) )
lowerCamelCase__ = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(__snake_case ) ):
for j in range(i + 1 ,len(__snake_case ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowerCamelCase__ = 0
# filters actual prime numbers.
lowerCamelCase__ = [x for x in begin_list if x != 0]
# precondition
assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type list"
return ans
def lowerCAmelCase__(__snake_case ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (n > 2), "'N' must been an int and > 2"
lowerCamelCase__ = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 ,n + 1 ):
if is_prime(__snake_case ):
ans.append(__snake_case )
# precondition
assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type list"
return ans
def lowerCAmelCase__(__snake_case ) -> List[str]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and number >= 0, "'number' must been an int and >= 0"
lowerCamelCase__ = [] # this list will be returns of the function.
# potential prime number factors.
lowerCamelCase__ = 2
lowerCamelCase__ = number
if number == 0 or number == 1:
ans.append(__snake_case )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(__snake_case ):
while quotient != 1:
if is_prime(__snake_case ) and (quotient % factor == 0):
ans.append(__snake_case )
quotient /= factor
else:
factor += 1
else:
ans.append(__snake_case )
# precondition
assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type list"
return ans
def lowerCAmelCase__(__snake_case ) -> List[Any]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCamelCase__ = 0
# prime factorization of 'number'
lowerCamelCase__ = prime_factorization(__snake_case )
lowerCamelCase__ = max(__snake_case )
# precondition
assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type int"
return ans
def lowerCAmelCase__(__snake_case ) -> Dict:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCamelCase__ = 0
# prime factorization of 'number'
lowerCamelCase__ = prime_factorization(__snake_case )
lowerCamelCase__ = min(__snake_case )
# precondition
assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type int"
return ans
def lowerCAmelCase__(__snake_case ) -> List[Any]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ), "'number' must been an int"
assert isinstance(number % 2 == 0 ,__snake_case ), "compare bust been from type bool"
return number % 2 == 0
def lowerCAmelCase__(__snake_case ) -> List[str]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ), "'number' must been an int"
assert isinstance(number % 2 != 0 ,__snake_case ), "compare bust been from type bool"
return number % 2 != 0
def lowerCAmelCase__(__snake_case ) -> List[Any]:
'''simple docstring'''
assert (
isinstance(__snake_case ,__snake_case ) and (number > 2) and is_even(__snake_case )
), "'number' must been an int, even and > 2"
lowerCamelCase__ = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowerCamelCase__ = get_prime_numbers(__snake_case )
lowerCamelCase__ = len(__snake_case )
# run variable for while-loops.
lowerCamelCase__ = 0
lowerCamelCase__ = None
# exit variable. for break up the loops
lowerCamelCase__ = True
while i < len_pn and loop:
lowerCamelCase__ = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowerCamelCase__ = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(__snake_case ,__snake_case )
and (len(__snake_case ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowerCAmelCase__(__snake_case ,__snake_case ) -> str:
'''simple docstring'''
assert (
isinstance(__snake_case ,__snake_case )
and isinstance(__snake_case ,__snake_case )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowerCamelCase__ = 0
while numbera != 0:
lowerCamelCase__ = numbera % numbera
lowerCamelCase__ = numbera
lowerCamelCase__ = rest
# precondition
assert isinstance(__snake_case ,__snake_case ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Any:
'''simple docstring'''
assert (
isinstance(__snake_case ,__snake_case )
and isinstance(__snake_case ,__snake_case )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowerCamelCase__ = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowerCamelCase__ = prime_factorization(__snake_case )
lowerCamelCase__ = prime_factorization(__snake_case )
elif numbera == 1 or numbera == 1:
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = max(__snake_case ,__snake_case )
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowerCamelCase__ = prime_fac_a.count(__snake_case )
lowerCamelCase__ = prime_fac_a.count(__snake_case )
for _ in range(max(__snake_case ,__snake_case ) ):
ans *= n
else:
lowerCamelCase__ = prime_fac_a.count(__snake_case )
for _ in range(__snake_case ):
ans *= n
done.append(__snake_case )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowerCamelCase__ = prime_fac_a.count(__snake_case )
for _ in range(__snake_case ):
ans *= n
done.append(__snake_case )
# precondition
assert isinstance(__snake_case ,__snake_case ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowerCAmelCase__(__snake_case ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (n >= 0), "'number' must been a positive int"
lowerCamelCase__ = 0
lowerCamelCase__ = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(__snake_case ):
ans += 1
# precondition
assert isinstance(__snake_case ,__snake_case ) and is_prime(
__snake_case ), "'ans' must been a prime number and from type int"
return ans
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Dict:
'''simple docstring'''
assert (
is_prime(__snake_case ) and is_prime(__snake_case ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowerCamelCase__ = p_number_a + 1 # jump to the next number
lowerCamelCase__ = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(__snake_case ):
number += 1
while number < p_number_a:
ans.append(__snake_case )
number += 1
# fetch the next prime number.
while not is_prime(__snake_case ):
number += 1
# precondition
assert (
isinstance(__snake_case ,__snake_case )
and ans[0] != p_number_a
and ans[len(__snake_case ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowerCAmelCase__(__snake_case ) -> Tuple:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (n >= 1), "'n' must been int and >= 1"
lowerCamelCase__ = [] # will be returned.
for divisor in range(1 ,n + 1 ):
if n % divisor == 0:
ans.append(__snake_case )
# precondition
assert ans[0] == 1 and ans[len(__snake_case ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowerCAmelCase__(__snake_case ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (
number > 1
), "'number' must been an int and >= 1"
lowerCamelCase__ = get_divisors(__snake_case )
# precondition
assert (
isinstance(__snake_case ,__snake_case )
and (divisors[0] == 1)
and (divisors[len(__snake_case ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Tuple:
'''simple docstring'''
assert (
isinstance(__snake_case ,__snake_case )
and isinstance(__snake_case ,__snake_case )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowerCamelCase__ = gcd(abs(__snake_case ) ,abs(__snake_case ) )
# precondition
assert (
isinstance(__snake_case ,__snake_case )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowerCAmelCase__(__snake_case ) -> Optional[int]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (n >= 0), "'n' must been a int and >= 0"
lowerCamelCase__ = 1 # this will be return.
for factor in range(1 ,n + 1 ):
ans *= factor
return ans
def lowerCAmelCase__(__snake_case ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (n >= 0), "'n' must been an int and >= 0"
lowerCamelCase__ = 0
lowerCamelCase__ = 1
lowerCamelCase__ = 1 # this will be return
for _ in range(n - 1 ):
lowerCamelCase__ = ans
ans += fiba
lowerCamelCase__ = tmp
return ans
| 29
| 0
|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
# TODO Update this
_a = {
"facebook/esm-1b": "https://huggingface.co/facebook/esm-1b/resolve/main/config.json",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class __A ( UpperCAmelCase_ ):
'''simple docstring'''
lowerCAmelCase_ = 'esm'
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=7_6_8 , __lowerCAmelCase=1_2 , __lowerCAmelCase=1_2 , __lowerCAmelCase=3_0_7_2 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=1_0_2_6 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-12 , __lowerCAmelCase="absolute" , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=_lowercase , mask_token_id=_lowercase , **_lowercase )
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = initializer_range
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = position_embedding_type
lowerCamelCase__ = use_cache
lowerCamelCase__ = emb_layer_norm_before
lowerCamelCase__ = token_dropout
lowerCamelCase__ = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
lowerCamelCase__ = EsmFoldConfig()
elif isinstance(_lowercase , _lowercase ):
lowerCamelCase__ = EsmFoldConfig(**_lowercase )
lowerCamelCase__ = esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
lowerCamelCase__ = get_default_vocab_list()
else:
lowerCamelCase__ = vocab_list
else:
lowerCamelCase__ = None
lowerCamelCase__ = None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , _lowercase ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = super().to_dict()
if isinstance(self.esmfold_config , _lowercase ):
lowerCamelCase__ = self.esmfold_config.to_dict()
return output
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase_ = None
lowerCAmelCase_ = True
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = 0
lowerCAmelCase_ = True
lowerCAmelCase_ = False
lowerCAmelCase_ = 128
lowerCAmelCase_ = None
def __lowerCamelCase ( self ):
'''simple docstring'''
if self.trunk is None:
lowerCamelCase__ = TrunkConfig()
elif isinstance(self.trunk , _lowercase ):
lowerCamelCase__ = TrunkConfig(**self.trunk )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = asdict(self )
lowerCamelCase__ = self.trunk.to_dict()
return output
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase_ = 48
lowerCAmelCase_ = 1024
lowerCAmelCase_ = 128
lowerCAmelCase_ = 32
lowerCAmelCase_ = 32
lowerCAmelCase_ = 32
lowerCAmelCase_ = 0
lowerCAmelCase_ = 0
lowerCAmelCase_ = False
lowerCAmelCase_ = 4
lowerCAmelCase_ = 128
lowerCAmelCase_ = None
def __lowerCamelCase ( self ):
'''simple docstring'''
if self.structure_module is None:
lowerCamelCase__ = StructureModuleConfig()
elif isinstance(self.structure_module , _lowercase ):
lowerCamelCase__ = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'`max_recycles` should be positive, got {self.max_recycles}.' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
F' {self.sequence_state_dim} and {self.sequence_state_dim}.' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
F' {self.pairwise_state_dim} and {self.pairwise_state_dim}.' )
lowerCamelCase__ = self.sequence_state_dim // self.sequence_head_width
lowerCamelCase__ = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
F' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
F' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.' )
if self.dropout >= 0.4:
raise ValueError(F'`dropout` should not be greater than 0.4, got {self.dropout}.' )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = asdict(self )
lowerCamelCase__ = self.structure_module.to_dict()
return output
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase_ = 384
lowerCAmelCase_ = 128
lowerCAmelCase_ = 16
lowerCAmelCase_ = 128
lowerCAmelCase_ = 12
lowerCAmelCase_ = 4
lowerCAmelCase_ = 8
lowerCAmelCase_ = 0.1
lowerCAmelCase_ = 8
lowerCAmelCase_ = 1
lowerCAmelCase_ = 2
lowerCAmelCase_ = 7
lowerCAmelCase_ = 10
lowerCAmelCase_ = 1e-8
lowerCAmelCase_ = 1e5
def __lowerCamelCase ( self ):
'''simple docstring'''
return asdict(self )
def lowerCAmelCase__() -> int:
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 707
|
from __future__ import annotations
def lowerCAmelCase__(__snake_case ,__snake_case = None ,__snake_case = None ) -> None:
'''simple docstring'''
if start is None:
lowerCamelCase__ = 0
if end is None:
lowerCamelCase__ = len(__snake_case ) - 1
if start >= end:
return
lowerCamelCase__ = (start + end) // 2
slowsort(__snake_case ,__snake_case ,__snake_case )
slowsort(__snake_case ,mid + 1 ,__snake_case )
if sequence[end] < sequence[mid]:
lowerCamelCase__ , lowerCamelCase__ = sequence[mid], sequence[end]
slowsort(__snake_case ,__snake_case ,end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 29
| 0
|
from __future__ import annotations
from typing import Any
class __A ( __A ):
'''simple docstring'''
pass
class __A :
'''simple docstring'''
def __init__( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = data
lowerCamelCase__ = None
def __iter__( self ):
'''simple docstring'''
lowerCamelCase__ = self
lowerCamelCase__ = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(__lowerCAmelCase )
yield node.data
lowerCamelCase__ = node.next_node
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
_a = Node(1)
_a = Node(2)
_a = Node(3)
_a = Node(4)
print(root_node.has_loop) # False
_a = root_node.next_node
print(root_node.has_loop) # True
_a = Node(5)
_a = Node(6)
_a = Node(5)
_a = Node(6)
print(root_node.has_loop) # False
_a = Node(1)
print(root_node.has_loop) # False
| 708
|
from __future__ import annotations
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> float:
'''simple docstring'''
if days_between_payments <= 0:
raise ValueError('''days_between_payments must be > 0''' )
if daily_interest_rate < 0:
raise ValueError('''daily_interest_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * daily_interest_rate * days_between_payments
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,) -> float:
'''simple docstring'''
if number_of_compounding_periods <= 0:
raise ValueError('''number_of_compounding_periods must be > 0''' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,) -> float:
'''simple docstring'''
if number_of_years <= 0:
raise ValueError('''number_of_years must be > 0''' )
if nominal_annual_percentage_rate < 0:
raise ValueError('''nominal_annual_percentage_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return compound_interest(
__snake_case ,nominal_annual_percentage_rate / 365 ,number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29
| 0
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
_a = logging.getLogger(__name__)
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase_ = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
lowerCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowerCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowerCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
lowerCAmelCase_ = field(default=lowerCAmelCase , metadata={"""help""": """Whether tp freeze the encoder."""} )
lowerCAmelCase_ = field(default=lowerCAmelCase , metadata={"""help""": """Whether to freeze the embeddings."""} )
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase_ = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
lowerCAmelCase_ = field(
default="""summarization""" , metadata={"""help""": """Task name, summarization (or summarization_{dataset} for pegasus) or translation"""} , )
lowerCAmelCase_ = field(
default=1024 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
lowerCAmelCase_ = field(
default=128 , metadata={
"""help""": (
"""The maximum total sequence length for target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
lowerCAmelCase_ = field(
default=142 , metadata={
"""help""": (
"""The maximum total sequence length for validation target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded. """
"""This argument is also used to override the ``max_length`` param of ``model.generate``, which is used """
"""during ``evaluate`` and ``predict``."""
)
} , )
lowerCAmelCase_ = field(
default=142 , metadata={
"""help""": (
"""The maximum total sequence length for test target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
lowerCAmelCase_ = field(default=-1 , metadata={"""help""": """# training examples. -1 means use all."""} )
lowerCAmelCase_ = field(default=-1 , metadata={"""help""": """# validation examples. -1 means use all."""} )
lowerCAmelCase_ = field(default=-1 , metadata={"""help""": """# test examples. -1 means use all."""} )
lowerCAmelCase_ = field(default=lowerCAmelCase , metadata={"""help""": """Source language id for translation."""} )
lowerCAmelCase_ = field(default=lowerCAmelCase , metadata={"""help""": """Target language id for translation."""} )
lowerCAmelCase_ = field(default=lowerCAmelCase , metadata={"""help""": """# num_beams to use for evaluation."""} )
lowerCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."""} , )
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> Optional[int]:
'''simple docstring'''
logger.info(F'***** {split} metrics *****' )
for key in sorted(metrics.keys() ):
logger.info(F' {key} = {metrics[key]}' )
save_json(__lowerCAmelCase ,os.path.join(__lowerCAmelCase ,F'{split}_results.json' ) )
def lowerCAmelCase__() -> Tuple:
'''simple docstring'''
lowerCamelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = parser.parse_args_into_dataclasses()
check_output_dir(__lowerCAmelCase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,)
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) ,training_args.fpaa ,)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' ,__lowerCAmelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,)
lowerCamelCase__ = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ):
assert hasattr(__lowerCAmelCase ,__lowerCAmelCase ), F'({config.__class__.__name__}) doesn\'t have a `{p}` attribute'
setattr(__lowerCAmelCase ,__lowerCAmelCase ,getattr(__lowerCAmelCase ,__lowerCAmelCase ) )
lowerCamelCase__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,)
lowerCamelCase__ = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path ,from_tf='''.ckpt''' in model_args.model_name_or_path ,config=__lowerCAmelCase ,cache_dir=model_args.cache_dir ,)
# use task specific params
use_task_specific_params(__lowerCAmelCase ,data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
lowerCamelCase__ = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(__lowerCAmelCase ,(MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
lowerCamelCase__ = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
lowerCamelCase__ = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(__lowerCAmelCase )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
lowerCamelCase__ = SeqaSeqDataset
# Get datasets
lowerCamelCase__ = (
dataset_class(
__lowerCAmelCase ,type_path='''train''' ,data_dir=data_args.data_dir ,n_obs=data_args.n_train ,max_target_length=data_args.max_target_length ,max_source_length=data_args.max_source_length ,prefix=model.config.prefix or '''''' ,)
if training_args.do_train
else None
)
lowerCamelCase__ = (
dataset_class(
__lowerCAmelCase ,type_path='''val''' ,data_dir=data_args.data_dir ,n_obs=data_args.n_val ,max_target_length=data_args.val_max_target_length ,max_source_length=data_args.max_source_length ,prefix=model.config.prefix or '''''' ,)
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
lowerCamelCase__ = (
dataset_class(
__lowerCAmelCase ,type_path='''test''' ,data_dir=data_args.data_dir ,n_obs=data_args.n_test ,max_target_length=data_args.test_max_target_length ,max_source_length=data_args.max_source_length ,prefix=model.config.prefix or '''''' ,)
if training_args.do_predict
else None
)
# Initialize our Trainer
lowerCamelCase__ = (
build_compute_metrics_fn(data_args.task ,__lowerCAmelCase ) if training_args.predict_with_generate else None
)
lowerCamelCase__ = SeqaSeqTrainer(
model=__lowerCAmelCase ,args=__lowerCAmelCase ,data_args=__lowerCAmelCase ,train_dataset=__lowerCAmelCase ,eval_dataset=__lowerCAmelCase ,data_collator=SeqaSeqDataCollator(
__lowerCAmelCase ,__lowerCAmelCase ,model.config.decoder_start_token_id ,training_args.tpu_num_cores ) ,compute_metrics=__lowerCAmelCase ,tokenizer=__lowerCAmelCase ,)
lowerCamelCase__ = {}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
lowerCamelCase__ = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
lowerCamelCase__ = train_result.metrics
lowerCamelCase__ = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''' ,__lowerCAmelCase ,training_args.output_dir )
all_metrics.update(__lowerCAmelCase )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir ,'''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCamelCase__ = trainer.evaluate(metric_key_prefix='''val''' )
lowerCamelCase__ = data_args.n_val
lowerCamelCase__ = round(metrics['''val_loss'''] ,4 )
if trainer.is_world_process_zero():
handle_metrics('''val''' ,__lowerCAmelCase ,training_args.output_dir )
all_metrics.update(__lowerCAmelCase )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
lowerCamelCase__ = trainer.predict(test_dataset=__lowerCAmelCase ,metric_key_prefix='''test''' )
lowerCamelCase__ = test_output.metrics
lowerCamelCase__ = data_args.n_test
if trainer.is_world_process_zero():
lowerCamelCase__ = round(metrics['''test_loss'''] ,4 )
handle_metrics('''test''' ,__lowerCAmelCase ,training_args.output_dir )
all_metrics.update(__lowerCAmelCase )
if training_args.predict_with_generate:
lowerCamelCase__ = tokenizer.batch_decode(
test_output.predictions ,skip_special_tokens=__lowerCAmelCase ,clean_up_tokenization_spaces=__lowerCAmelCase )
lowerCamelCase__ = lmap(str.strip ,__lowerCAmelCase )
write_txt_file(__lowerCAmelCase ,os.path.join(training_args.output_dir ,'''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(__lowerCAmelCase ,os.path.join(training_args.output_dir ,'''all_results.json''' ) )
return all_metrics
def lowerCAmelCase__(__snake_case ) -> List[Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 709
|
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowerCAmelCase__(__snake_case ) -> Union[str, Any]:
'''simple docstring'''
def wrapper(*__snake_case ,**__snake_case ):
lowerCamelCase__ = timeit.default_timer()
lowerCamelCase__ = func(*__snake_case ,**__snake_case )
lowerCamelCase__ = timeit.default_timer() - starttime
return delta
lowerCamelCase__ = func.__name__
return wrapper
def lowerCAmelCase__(__snake_case ,__snake_case=100 ,__snake_case=None ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = []
lowerCamelCase__ = seq_shapes or {}
for i in range(__snake_case ):
lowerCamelCase__ = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(__snake_case ,_ArrayXD ):
lowerCamelCase__ = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(__snake_case ,datasets.Value ):
if v.dtype == "string":
lowerCamelCase__ = '''The small grey turtle was surprisingly fast when challenged.'''
else:
lowerCamelCase__ = np.random.randint(10 ,size=1 ).astype(v.dtype ).item()
elif isinstance(__snake_case ,datasets.Sequence ):
while isinstance(__snake_case ,datasets.Sequence ):
lowerCamelCase__ = v.feature
lowerCamelCase__ = seq_shapes[k]
lowerCamelCase__ = np.random.rand(*__snake_case ).astype(v.dtype )
lowerCamelCase__ = data
dummy_data.append((i, example) )
return dummy_data
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case=100 ,__snake_case=None ) -> str:
'''simple docstring'''
lowerCamelCase__ = generate_examples(__snake_case ,num_examples=__snake_case ,seq_shapes=__snake_case )
with ArrowWriter(features=__snake_case ,path=__snake_case ) as writer:
for key, record in dummy_data:
lowerCamelCase__ = features.encode_example(__snake_case )
writer.write(__snake_case )
lowerCamelCase__ , lowerCamelCase__ = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F'Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.' )
lowerCamelCase__ = datasets.Dataset.from_file(filename=__snake_case ,info=datasets.DatasetInfo(features=__snake_case ) )
return dataset
| 29
| 0
|
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> Optional[Any]:
'''simple docstring'''
with open(lowerCamelCase__ ) as metadata_file:
lowerCamelCase__ = json.load(lowerCamelCase__ )
lowerCamelCase__ = LukeConfig(use_entity_aware_attention=lowerCamelCase__ ,**metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
lowerCamelCase__ = torch.load(lowerCamelCase__ ,map_location='''cpu''' )["module"]
# Load the entity vocab file
lowerCamelCase__ = load_original_entity_vocab(lowerCamelCase__ )
# add an entry for [MASK2]
lowerCamelCase__ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
lowerCamelCase__ = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
lowerCamelCase__ = AddedToken('''<ent>''' ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ )
lowerCamelCase__ = AddedToken('''<ent2>''' ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(lowerCamelCase__ )
with open(os.path.join(lowerCamelCase__ ,'''tokenizer_config.json''' ) ,'''r''' ) as f:
lowerCamelCase__ = json.load(lowerCamelCase__ )
lowerCamelCase__ = "MLukeTokenizer"
with open(os.path.join(lowerCamelCase__ ,'''tokenizer_config.json''' ) ,'''w''' ) as f:
json.dump(lowerCamelCase__ ,lowerCamelCase__ )
with open(os.path.join(lowerCamelCase__ ,MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) ,'''w''' ) as f:
json.dump(lowerCamelCase__ ,lowerCamelCase__ )
lowerCamelCase__ = MLukeTokenizer.from_pretrained(lowerCamelCase__ )
# Initialize the embeddings of the special tokens
lowerCamelCase__ = tokenizer.convert_tokens_to_ids(['''@'''] )[0]
lowerCamelCase__ = tokenizer.convert_tokens_to_ids(['''#'''] )[0]
lowerCamelCase__ = state_dict["embeddings.word_embeddings.weight"]
lowerCamelCase__ = word_emb[ent_init_index].unsqueeze(0 )
lowerCamelCase__ = word_emb[enta_init_index].unsqueeze(0 )
lowerCamelCase__ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
lowerCamelCase__ = state_dict[bias_name]
lowerCamelCase__ = decoder_bias[ent_init_index].unsqueeze(0 )
lowerCamelCase__ = decoder_bias[enta_init_index].unsqueeze(0 )
lowerCamelCase__ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
lowerCamelCase__ = F'encoder.layer.{layer_index}.attention.self.'
lowerCamelCase__ = state_dict[prefix + matrix_name]
lowerCamelCase__ = state_dict[prefix + matrix_name]
lowerCamelCase__ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
lowerCamelCase__ = state_dict["entity_embeddings.entity_embeddings.weight"]
lowerCamelCase__ = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
lowerCamelCase__ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
lowerCamelCase__ = state_dict["entity_predictions.bias"]
lowerCamelCase__ = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
lowerCamelCase__ = torch.cat([entity_prediction_bias, entity_mask_bias] )
lowerCamelCase__ = LukeForMaskedLM(config=lowerCamelCase__ ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
lowerCamelCase__ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
lowerCamelCase__ = state_dict[key]
else:
lowerCamelCase__ = state_dict[key]
lowerCamelCase__ = model.load_state_dict(lowerCamelCase__ ,strict=lowerCamelCase__ )
if set(lowerCamelCase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'Unexpected unexpected_keys: {unexpected_keys}' )
if set(lowerCamelCase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'Unexpected missing_keys: {missing_keys}' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
lowerCamelCase__ = MLukeTokenizer.from_pretrained(lowerCamelCase__ ,task='''entity_classification''' )
lowerCamelCase__ = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
lowerCamelCase__ = (0, 9)
lowerCamelCase__ = tokenizer(lowerCamelCase__ ,entity_spans=[span] ,return_tensors='''pt''' )
lowerCamelCase__ = model(**lowerCamelCase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
lowerCamelCase__ = torch.Size((1, 33, 768) )
lowerCamelCase__ = torch.tensor([[0.0_8_9_2, 0.0_5_9_6, -0.2_8_1_9], [0.0_1_3_4, 0.1_1_9_9, 0.0_5_7_3], [-0.0_1_6_9, 0.0_9_2_7, 0.0_6_4_4]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,lowerCamelCase__ ,atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
lowerCamelCase__ = torch.Size((1, 1, 768) )
lowerCamelCase__ = torch.tensor([[-0.1_4_8_2, 0.0_6_0_9, 0.0_3_2_2]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
F' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,lowerCamelCase__ ,atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
lowerCamelCase__ = MLukeTokenizer.from_pretrained(lowerCamelCase__ )
lowerCamelCase__ = "Tokyo is the capital of <mask>."
lowerCamelCase__ = (24, 30)
lowerCamelCase__ = tokenizer(lowerCamelCase__ ,entity_spans=[span] ,return_tensors='''pt''' )
lowerCamelCase__ = model(**lowerCamelCase__ )
lowerCamelCase__ = encoding["input_ids"][0].tolist()
lowerCamelCase__ = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
lowerCamelCase__ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowerCamelCase__ )
lowerCamelCase__ = outputs.entity_logits[0][0].argmax().item()
lowerCamelCase__ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(lowerCamelCase__ ) )
model.save_pretrained(lowerCamelCase__ )
def lowerCAmelCase__(__snake_case ) -> List[str]:
'''simple docstring'''
lowerCamelCase__ = ["[MASK]", "[PAD]", "[UNK]"]
lowerCamelCase__ = [json.loads(lowerCamelCase__ ) for line in open(lowerCamelCase__ )]
lowerCamelCase__ = {}
for entry in data:
lowerCamelCase__ = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
lowerCamelCase__ = entity_id
break
lowerCamelCase__ = F'{language}:{entity_name}'
lowerCamelCase__ = entity_id
return new_mapping
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
_a = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 710
|
def lowerCAmelCase__(__snake_case ) -> int:
'''simple docstring'''
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''' )
for cell_n in range(1 ,len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
lowerCamelCase__ = grid[0]
for row_n in range(1 ,len(__snake_case ) ):
lowerCamelCase__ = grid[row_n]
lowerCamelCase__ = fill_row(__snake_case ,__snake_case )
lowerCamelCase__ = grid[row_n]
return grid[-1][-1]
def lowerCAmelCase__(__snake_case ,__snake_case ) -> list:
'''simple docstring'''
current_row[0] += row_above[0]
for cell_n in range(1 ,len(__snake_case ) ):
current_row[cell_n] += min(current_row[cell_n - 1] ,row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29
| 0
|
'''simple docstring'''
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Any:
'''simple docstring'''
while a != 0:
lowerCamelCase__ = b % a, a
return b
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Optional[int]:
'''simple docstring'''
if gcd(_lowerCAmelCase ,_lowerCAmelCase ) != 1:
lowerCamelCase__ = F'mod inverse of {a!r} and {m!r} does not exist'
raise ValueError(_lowerCAmelCase )
lowerCamelCase__ = 1, 0, a
lowerCamelCase__ = 0, 1, m
while va != 0:
lowerCamelCase__ = ua // va
lowerCamelCase__ = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 711
|
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
_a = logging.get_logger(__name__)
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase_ = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(glue_processors.keys() )} )
lowerCAmelCase_ = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
lowerCAmelCase_ = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.task_name.lower()
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = """train"""
lowerCAmelCase_ = """dev"""
lowerCAmelCase_ = """test"""
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = Split.train , __lowerCAmelCase = None , ):
'''simple docstring'''
warnings.warn(
'''This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' , __lowerCAmelCase , )
lowerCamelCase__ = args
lowerCamelCase__ = glue_processors[args.task_name]()
lowerCamelCase__ = glue_output_modes[args.task_name]
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
try:
lowerCamelCase__ = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
# Load data features from cache or dataset file
lowerCamelCase__ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , )
lowerCamelCase__ = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCamelCase__ , lowerCamelCase__ = label_list[2], label_list[1]
lowerCamelCase__ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase__ = cached_features_file + '''.lock'''
with FileLock(__lowerCAmelCase ):
if os.path.exists(__lowerCAmelCase ) and not args.overwrite_cache:
lowerCamelCase__ = time.time()
lowerCamelCase__ = torch.load(__lowerCAmelCase )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
else:
logger.info(F'Creating features from dataset file at {args.data_dir}' )
if mode == Split.dev:
lowerCamelCase__ = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
lowerCamelCase__ = self.processor.get_test_examples(args.data_dir )
else:
lowerCamelCase__ = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
lowerCamelCase__ = examples[:limit_length]
lowerCamelCase__ = glue_convert_examples_to_features(
__lowerCAmelCase , __lowerCAmelCase , max_length=args.max_seq_length , label_list=__lowerCAmelCase , output_mode=self.output_mode , )
lowerCamelCase__ = time.time()
torch.save(self.features , __lowerCAmelCase )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self , __lowerCAmelCase ):
'''simple docstring'''
return self.features[i]
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.label_list
| 29
| 0
|
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __A ( UpperCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ = (DDPMScheduler,)
def __lowerCamelCase ( self , **__lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = {
"""num_train_timesteps""": 1_0_0_0,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**_a )
return config
def __lowerCamelCase ( self ):
'''simple docstring'''
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_a )
def __lowerCamelCase ( self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_a , beta_end=_a )
def __lowerCamelCase ( self ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_a )
def __lowerCamelCase ( self ):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_a )
def __lowerCamelCase ( self ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_a )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.check_over_configs(thresholding=_a )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_a , prediction_type=_a , sample_max_value=_a , )
def __lowerCamelCase ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def __lowerCamelCase ( self ):
'''simple docstring'''
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=_a )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config()
lowerCamelCase__ = scheduler_class(**_a )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_0979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1E-5
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config()
lowerCamelCase__ = scheduler_class(**_a )
lowerCamelCase__ = len(_a )
lowerCamelCase__ = self.dummy_model()
lowerCamelCase__ = self.dummy_sample_deter
lowerCamelCase__ = torch.manual_seed(0 )
for t in reversed(range(_a ) ):
# 1. predict noise residual
lowerCamelCase__ = model(_a , _a )
# 2. predict previous mean of sample x_t-1
lowerCamelCase__ = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCamelCase__ = pred_prev_sample
lowerCamelCase__ = torch.sum(torch.abs(_a ) )
lowerCamelCase__ = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowerCamelCase__ = scheduler_class(**_a )
lowerCamelCase__ = len(_a )
lowerCamelCase__ = self.dummy_model()
lowerCamelCase__ = self.dummy_sample_deter
lowerCamelCase__ = torch.manual_seed(0 )
for t in reversed(range(_a ) ):
# 1. predict noise residual
lowerCamelCase__ = model(_a , _a )
# 2. predict previous mean of sample x_t-1
lowerCamelCase__ = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCamelCase__ = pred_prev_sample
lowerCamelCase__ = torch.sum(torch.abs(_a ) )
lowerCamelCase__ = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config()
lowerCamelCase__ = scheduler_class(**_a )
lowerCamelCase__ = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=_a )
lowerCamelCase__ = scheduler.timesteps
for i, timestep in enumerate(_a ):
if i == len(_a ) - 1:
lowerCamelCase__ = -1
else:
lowerCamelCase__ = timesteps[i + 1]
lowerCamelCase__ = scheduler.previous_timestep(_a )
lowerCamelCase__ = prev_t.item()
self.assertEqual(_a , _a )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config()
lowerCamelCase__ = scheduler_class(**_a )
lowerCamelCase__ = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(_a , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=_a )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config()
lowerCamelCase__ = scheduler_class(**_a )
lowerCamelCase__ = [1_0_0, 8_7, 5_0, 1, 0]
lowerCamelCase__ = len(_a )
with self.assertRaises(_a , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=_a , timesteps=_a )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config()
lowerCamelCase__ = scheduler_class(**_a )
lowerCamelCase__ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_a , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=_a )
| 712
|
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
_a = datasets.logging.get_logger(__name__)
_a = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
_a = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
_a = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case=False ,__snake_case=False ,__snake_case=True ,__snake_case=False ,__snake_case="dummy_doc" ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = {doc: key_lines}
lowerCamelCase__ = {doc: sys_lines}
lowerCamelCase__ = {}
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ , lowerCamelCase__ = reader.get_doc_mentions(__snake_case ,key_doc_lines[doc] ,__snake_case )
key_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase__ = reader.set_annotated_parse_trees(__snake_case ,key_doc_lines[doc] ,__snake_case ,__snake_case )
lowerCamelCase__ , lowerCamelCase__ = reader.get_doc_mentions(__snake_case ,sys_doc_lines[doc] ,__snake_case )
sys_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase__ = reader.set_annotated_parse_trees(__snake_case ,key_doc_lines[doc] ,__snake_case ,__snake_case )
if remove_nested:
lowerCamelCase__ , lowerCamelCase__ = reader.remove_nested_coref_mentions(__snake_case ,__snake_case )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowerCamelCase__ , lowerCamelCase__ = reader.remove_nested_coref_mentions(__snake_case ,__snake_case )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowerCamelCase__ = reader.get_mention_assignments(__snake_case ,__snake_case )
lowerCamelCase__ = reader.get_mention_assignments(__snake_case ,__snake_case )
lowerCamelCase__ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
F'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' )
logger.info(
'''Number of resulting singleton clusters in the key '''
F'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' )
if not keep_singletons:
logger.info(
F'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '
'''files, respectively''' )
return doc_coref_infos
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> str:
'''simple docstring'''
lowerCamelCase__ = get_coref_infos(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
lowerCamelCase__ = {}
lowerCamelCase__ = 0
lowerCamelCase__ = 0
for name, metric in metrics:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = evaluator.evaluate_documents(__snake_case ,__snake_case ,beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'{name}/recall': recall, F'{name}/precision': precision, F'{name}/f1': fa} )
logger.info(
name.ljust(10 ) ,F'Recall: {recall * 100:.2f}' ,F' Precision: {precision * 100:.2f}' ,F' F1: {fa * 100:.2f}' ,)
if conll_subparts_num == 3:
lowerCamelCase__ = (conll / 3) * 100
logger.info(F'CoNLL score: {conll:.2f}' )
output_scores.update({'''conll_score''': conll} )
return output_scores
def lowerCAmelCase__(__snake_case ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
lowerCamelCase__ = line.split()[5]
if not parse_col == "-":
lowerCamelCase__ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
'''simple docstring'''
def __lowerCamelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Sequence(datasets.Value('''string''' ) ),
} ) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] , )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=False ):
'''simple docstring'''
lowerCamelCase__ = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
lowerCamelCase__ = util.check_gold_parse_annotation(__lowerCAmelCase )
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowerCamelCase__ = evaluate(
key_lines=__lowerCAmelCase , sys_lines=__lowerCAmelCase , metrics=__lowerCAmelCase , NP_only=__lowerCAmelCase , remove_nested=__lowerCAmelCase , keep_singletons=__lowerCAmelCase , min_span=__lowerCAmelCase , )
return score
| 29
| 0
|
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def lowerCAmelCase__() -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''-m''' ,'''--pretrained_model_name_or_path''' ,type=__UpperCAmelCase ,default=__UpperCAmelCase ,required=__UpperCAmelCase ,help='''Path to pretrained model or model identifier from huggingface.co/models.''' ,)
parser.add_argument(
'''-c''' ,'''--caption''' ,type=__UpperCAmelCase ,default='''robotic cat with wings''' ,help='''Text used to generate images.''' ,)
parser.add_argument(
'''-n''' ,'''--images_num''' ,type=__UpperCAmelCase ,default=4 ,help='''How much images to generate.''' ,)
parser.add_argument(
'''-s''' ,'''--seed''' ,type=__UpperCAmelCase ,default=42 ,help='''Seed for random process.''' ,)
parser.add_argument(
'''-ci''' ,'''--cuda_id''' ,type=__UpperCAmelCase ,default=0 ,help='''cuda_id.''' ,)
lowerCamelCase__ = parser.parse_args()
return args
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> Optional[Any]:
'''simple docstring'''
if not len(__UpperCAmelCase ) == rows * cols:
raise ValueError('''The specified number of rows and columns are not correct.''' )
lowerCamelCase__ , lowerCamelCase__ = imgs[0].size
lowerCamelCase__ = Image.new('''RGB''' ,size=(cols * w, rows * h) )
lowerCamelCase__ , lowerCamelCase__ = grid.size
for i, img in enumerate(__UpperCAmelCase ):
grid.paste(__UpperCAmelCase ,box=(i % cols * w, i // cols * h) )
return grid
def lowerCAmelCase__(__snake_case ,__snake_case="robotic cat with wings" ,__snake_case=7.5 ,__snake_case=50 ,__snake_case=1 ,__snake_case=42 ,) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = torch.Generator(pipeline.device ).manual_seed(__UpperCAmelCase )
lowerCamelCase__ = pipeline(
__UpperCAmelCase ,guidance_scale=__UpperCAmelCase ,num_inference_steps=__UpperCAmelCase ,generator=__UpperCAmelCase ,num_images_per_prompt=__UpperCAmelCase ,).images
lowerCamelCase__ = int(math.sqrt(__UpperCAmelCase ) )
lowerCamelCase__ = image_grid(__UpperCAmelCase ,rows=_rows ,cols=num_images_per_prompt // _rows )
return grid, images
_a = parse_args()
# Load models and create wrapper for stable diffusion
_a = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer")
_a = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder")
_a = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae")
_a = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet")
_a = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
_a = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, "best_model.pt")):
_a = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, "unet", unet)
else:
_a = unet.to(torch.device("cuda", args.cuda_id))
_a = pipeline.to(unet.device)
_a , _a = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, "{}.png".format("_".join(args.caption.split()))))
_a = os.path.join(args.pretrained_model_name_or_path, "_".join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, "{}.png".format(idx + 1)))
| 713
|
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
_a = open # noqa: we just need to have a builtin inside this module to test it properly
| 29
| 0
|
from decimal import Decimal, getcontext
from math import ceil, factorial
def lowerCAmelCase__(__snake_case ) -> str:
'''simple docstring'''
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
raise TypeError('''Undefined for non-integers''' )
elif precision < 1:
raise ValueError('''Undefined for non-natural numbers''' )
lowerCamelCase__ = precision
lowerCamelCase__ = ceil(precision / 14 )
lowerCamelCase__ = 426880 * Decimal(10005 ).sqrt()
lowerCamelCase__ = 1
lowerCamelCase__ = 13591409
lowerCamelCase__ = Decimal(lowerCAmelCase__ )
for k in range(1 ,lowerCAmelCase__ ):
lowerCamelCase__ = factorial(6 * k ) // (factorial(3 * k ) * factorial(lowerCAmelCase__ ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
_a = 50
print(f"""The first {n} digits of pi is: {pi(n)}""")
| 714
|
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
_a = logging.get_logger(__name__)
class __A :
'''simple docstring'''
lowerCAmelCase_ = None
@experimental
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> Tuple:
'''simple docstring'''
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
return _map_with_joblib(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> int:
'''simple docstring'''
lowerCamelCase__ = num_proc if num_proc <= len(__snake_case ) else len(__snake_case )
lowerCamelCase__ = [] # We organize the splits ourselve (contiguous splits)
for index in range(__snake_case ):
lowerCamelCase__ = len(__snake_case ) // num_proc
lowerCamelCase__ = len(__snake_case ) % num_proc
lowerCamelCase__ = div * index + min(__snake_case ,__snake_case )
lowerCamelCase__ = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(__snake_case ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
F'Error dividing inputs iterable among processes. '
F'Total number of objects {len(__snake_case )}, '
F'length: {sum(len(i[1] ) for i in split_kwds )}' )
logger.info(
F'Spawning {num_proc} processes for {len(__snake_case )} objects in slices of {[len(i[1] ) for i in split_kwds]}' )
lowerCamelCase__ , lowerCamelCase__ = None, None
if not disable_tqdm:
lowerCamelCase__ , lowerCamelCase__ = (RLock(),), tqdm.set_lock
with Pool(__snake_case ,initargs=__snake_case ,initializer=__snake_case ) as pool:
lowerCamelCase__ = pool.map(__snake_case ,__snake_case )
logger.info(F'Finished {num_proc} processes' )
lowerCamelCase__ = [obj for proc_res in mapped for obj in proc_res]
logger.info(F'Unpacked {len(__snake_case )} objects' )
return mapped
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> List[str]:
'''simple docstring'''
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name ,n_jobs=__snake_case ):
return joblib.Parallel()(
joblib.delayed(__snake_case )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def lowerCAmelCase__(__snake_case ) -> int:
'''simple docstring'''
lowerCamelCase__ = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
lowerCamelCase__ = None
| 29
| 0
|
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __A ( __lowerCAmelCase ):
'''simple docstring'''
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_UpperCamelCase , '''tf_padding''' ) )
self.parent.assertTrue(hasattr(_UpperCamelCase , '''depth_multiplier''' ) )
class __A :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=3 , __lowerCAmelCase=3_2 , __lowerCAmelCase=0.25 , __lowerCAmelCase=8 , __lowerCAmelCase=True , __lowerCAmelCase=1_0_2_4 , __lowerCAmelCase=3_2 , __lowerCAmelCase="relu6" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.02 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=1_0 , __lowerCAmelCase=None , ):
'''simple docstring'''
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = image_size
lowerCamelCase__ = depth_multiplier
lowerCamelCase__ = min_depth
lowerCamelCase__ = tf_padding
lowerCamelCase__ = int(last_hidden_size * depth_multiplier )
lowerCamelCase__ = output_stride
lowerCamelCase__ = hidden_act
lowerCamelCase__ = classifier_dropout_prob
lowerCamelCase__ = use_labels
lowerCamelCase__ = is_training
lowerCamelCase__ = num_labels
lowerCamelCase__ = initializer_range
lowerCamelCase__ = scope
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ = None
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCamelCase__ = self.get_config()
return config, pixel_values, labels, pixel_labels
def __lowerCamelCase ( self ):
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = MobileNetVaModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
lowerCamelCase__ = model(_UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = MobileNetVaForImageClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
lowerCamelCase__ = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.prepare_config_and_inputs()
lowerCamelCase__ = config_and_inputs
lowerCamelCase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __A ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
lowerCAmelCase_ = (
{"""feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = MobileNetVaModelTester(self )
lowerCamelCase__ = MobileNetVaConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV1 does not use inputs_embeds''' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='''MobileNetV1 does not support input and output embeddings''' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='''MobileNetV1 does not output attentions''' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(_UpperCamelCase )
lowerCamelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ = [*signature.parameters.keys()]
lowerCamelCase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
lowerCamelCase__ = outputs.hidden_states
lowerCamelCase__ = 2_6
self.assertEqual(len(_UpperCamelCase ) , _UpperCamelCase )
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = MobileNetVaModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def lowerCAmelCase__() -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __lowerCamelCase ( self ):
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v1_1.0_224''' ) if is_vision_available() else None
)
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v1_1.0_224''' ).to(_UpperCamelCase )
lowerCamelCase__ = self.default_image_processor
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = image_processor(images=_UpperCamelCase , return_tensors='''pt''' ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
lowerCamelCase__ = model(**_UpperCamelCase )
# verify the logits
lowerCamelCase__ = torch.Size((1, 1_0_0_1) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
lowerCamelCase__ = torch.tensor([-4.1739, -1.1233, 3.1205] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1E-4 ) )
| 715
|
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = 42
class __A ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
@register_to_config
def __init__( self , __lowerCAmelCase = 1_6 , __lowerCAmelCase = 8_8 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = 1 , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 3_2 , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = "geglu" , __lowerCAmelCase = True , __lowerCAmelCase = True , ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = attention_head_dim
lowerCamelCase__ = num_attention_heads * attention_head_dim
lowerCamelCase__ = in_channels
lowerCamelCase__ = torch.nn.GroupNorm(num_groups=__lowerCAmelCase , num_channels=__lowerCAmelCase , eps=1E-6 , affine=__lowerCAmelCase )
lowerCamelCase__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase )
# 3. Define transformers blocks
lowerCamelCase__ = nn.ModuleList(
[
BasicTransformerBlock(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , dropout=__lowerCAmelCase , cross_attention_dim=__lowerCAmelCase , activation_fn=__lowerCAmelCase , attention_bias=__lowerCAmelCase , double_self_attention=__lowerCAmelCase , norm_elementwise_affine=__lowerCAmelCase , )
for d in range(__lowerCAmelCase )
] )
lowerCamelCase__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=1 , __lowerCAmelCase=None , __lowerCAmelCase = True , ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = hidden_states.shape
lowerCamelCase__ = batch_frames // num_frames
lowerCamelCase__ = hidden_states
lowerCamelCase__ = hidden_states[None, :].reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
lowerCamelCase__ = self.norm(__lowerCAmelCase )
lowerCamelCase__ = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , __lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = self.proj_in(__lowerCAmelCase )
# 2. Blocks
for block in self.transformer_blocks:
lowerCamelCase__ = block(
__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , timestep=__lowerCAmelCase , cross_attention_kwargs=__lowerCAmelCase , class_labels=__lowerCAmelCase , )
# 3. Output
lowerCamelCase__ = self.proj_out(__lowerCAmelCase )
lowerCamelCase__ = (
hidden_states[None, None, :]
.reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
lowerCamelCase__ = hidden_states.reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=__lowerCAmelCase )
| 29
| 0
|
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_a : Optional[int] = get_tests_dir("fixtures")
_a : Optional[Any] = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
_a : Union[str, Any] = get_tests_dir("fixtures/dummy-config.json")
class __A ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = 0
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = AutoFeatureExtractor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(_A , _A )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = AutoFeatureExtractor.from_pretrained(_A )
self.assertIsInstance(_A , _A )
def __lowerCamelCase ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
lowerCamelCase__ = AutoFeatureExtractor.from_pretrained(_A ).to_dict()
config_dict.pop('''feature_extractor_type''' )
lowerCamelCase__ = WavaVecaFeatureExtractor(**_A )
# save in new folder
model_config.save_pretrained(_A )
config.save_pretrained(_A )
lowerCamelCase__ = AutoFeatureExtractor.from_pretrained(_A )
# make sure private variable is not incorrectly saved
lowerCamelCase__ = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(_A , _A )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = AutoFeatureExtractor.from_pretrained(_A )
self.assertIsInstance(_A , _A )
def __lowerCamelCase ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_A , '''bert-base is not a local folder and is not a valid model identifier''' ):
lowerCamelCase__ = AutoFeatureExtractor.from_pretrained('''bert-base''' )
def __lowerCamelCase ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_A , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
lowerCamelCase__ = AutoFeatureExtractor.from_pretrained(_A , revision='''aaaaaa''' )
def __lowerCamelCase ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_A , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
lowerCamelCase__ = AutoFeatureExtractor.from_pretrained('''hf-internal-testing/config-no-model''' )
def __lowerCamelCase ( self ):
'''simple docstring'''
with self.assertRaises(_A ):
lowerCamelCase__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_A ):
lowerCamelCase__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=_A )
lowerCamelCase__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=_A )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_A )
lowerCamelCase__ = AutoFeatureExtractor.from_pretrained(_A , trust_remote_code=_A )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
def __lowerCamelCase ( self ):
'''simple docstring'''
try:
AutoConfig.register('''custom''' , _A )
AutoFeatureExtractor.register(_A , _A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_A ):
AutoFeatureExtractor.register(_A , _A )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCamelCase__ = CustomFeatureExtractor.from_pretrained(_A )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_A )
lowerCamelCase__ = AutoFeatureExtractor.from_pretrained(_A )
self.assertIsInstance(_A , _A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def __lowerCamelCase ( self ):
'''simple docstring'''
class __A ( a__ ):
'''simple docstring'''
lowerCAmelCase_ = True
try:
AutoConfig.register('''custom''' , _A )
AutoFeatureExtractor.register(_A , _A )
# If remote code is not set, the default is to use local
lowerCamelCase__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
lowerCamelCase__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=_A )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
lowerCamelCase__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=_A )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(not hasattr(_A , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 716
|
_a = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
_a = [{"type": "code", "content": INSTALL_CONTENT}]
_a = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 29
| 0
|
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def lowerCAmelCase__(__snake_case ) -> str:
'''simple docstring'''
monkeypatch.setattr('''datasets.utils.deprecation_utils._emitted_deprecation_warnings''' ,set() )
@pytest.fixture
def lowerCAmelCase__(__snake_case ) -> Any:
'''simple docstring'''
class __A :
'''simple docstring'''
def __init__( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = metric_id
class __A :
'''simple docstring'''
lowerCAmelCase_ = [MetricMock(_A ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]]
def __lowerCamelCase ( self ):
'''simple docstring'''
return self._metrics
monkeypatch.setattr('''datasets.inspect.huggingface_hub''' ,HfhMock() )
@pytest.mark.parametrize(
'''func, args''' ,[(load_metric, ('''metrics/mse''',)), (list_metrics, ()), (inspect_metric, ('''metrics/mse''', '''tmp_path'''))] )
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> List[Any]:
'''simple docstring'''
if "tmp_path" in args:
lowerCamelCase__ = tuple(arg if arg != '''tmp_path''' else tmp_path for arg in args )
with pytest.warns(_lowerCamelCase ,match='''https://huggingface.co/docs/evaluate''' ):
func(*_lowerCamelCase )
| 717
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_a = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
_a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 29
| 0
|
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def lowerCAmelCase__(__snake_case ) -> Optional[Any]: # picklable for multiprocessing
'''simple docstring'''
return x.sum()
def lowerCAmelCase__(__snake_case ) -> List[Any]: # picklable for multiprocessing
'''simple docstring'''
return i + 1
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
class __A ( _UpperCAmelCase ):
'''simple docstring'''
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = {}
lowerCamelCase__ = []
lowerCamelCase__ = 1
lowerCamelCase__ = [1, 2]
lowerCamelCase__ = {'''a''': 1, '''b''': 2}
lowerCamelCase__ = {'''a''': [1, 2], '''b''': [3, 4]}
lowerCamelCase__ = {'''a''': {'''1''': 1}, '''b''': 2}
lowerCamelCase__ = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
lowerCamelCase__ = {}
lowerCamelCase__ = []
lowerCamelCase__ = 2
lowerCamelCase__ = [2, 3]
lowerCamelCase__ = {'''a''': 2, '''b''': 3}
lowerCamelCase__ = {'''a''': [2, 3], '''b''': [4, 5]}
lowerCamelCase__ = {'''a''': {'''1''': 2}, '''b''': 3}
lowerCamelCase__ = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
self.assertEqual(map_nested(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(map_nested(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(map_nested(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(map_nested(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(map_nested(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(map_nested(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(map_nested(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(map_nested(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ )
lowerCamelCase__ = 2
self.assertEqual(map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=lowerCamelCase_ ) , lowerCamelCase_ )
lowerCamelCase__ = {'''a''': np.eye(2 ), '''b''': np.zeros(3 ), '''c''': np.ones(2 )}
lowerCamelCase__ = {'''a''': 2, '''b''': 0, '''c''': 2}
lowerCamelCase__ = {
'''a''': np.eye(2 ).astype(lowerCamelCase_ ),
'''b''': np.zeros(3 ).astype(lowerCamelCase_ ),
'''c''': np.ones(2 ).astype(lowerCamelCase_ ),
}
self.assertEqual(map_nested(lowerCamelCase_ , lowerCamelCase_ , map_numpy=lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(lowerCamelCase_ , lowerCamelCase_ , map_numpy=lowerCamelCase_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(lowerCamelCase_ , lowerCamelCase_ , map_numpy=lowerCamelCase_ , num_proc=lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(lowerCamelCase_ , lowerCamelCase_ , map_numpy=lowerCamelCase_ , num_proc=lowerCamelCase_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(lowerCamelCase_ ): # can't pickle a local lambda
map_nested(lambda __lowerCAmelCase : x + 1 , lowerCamelCase_ , num_proc=lowerCamelCase_ )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = {'''a''': 1, '''b''': 2}
lowerCamelCase__ = {'''a''': 3, '''b''': 4}
lowerCamelCase__ = {'''a''': 5, '''b''': 6}
lowerCamelCase__ = sorted([('''a''', (1, 3, 5)), ('''b''', (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) ) , lowerCamelCase_ )
def __lowerCamelCase ( self ):
'''simple docstring'''
class __A :
'''simple docstring'''
lowerCAmelCase_ = """bar"""
lowerCamelCase__ = Foo()
self.assertEqual(foo.my_attr , '''bar''' )
with temporary_assignment(lowerCamelCase_ , '''my_attr''' , '''BAR''' ):
self.assertEqual(foo.my_attr , '''BAR''' )
self.assertEqual(foo.my_attr , '''bar''' )
@pytest.mark.parametrize(
'''iterable_length, num_proc, expected_num_proc''' ,[
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] ,)
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> str:
'''simple docstring'''
with patch('''datasets.utils.py_utils._single_map_nested''' ) as mock_single_map_nested, patch(
'''datasets.parallel.parallel.Pool''' ) as mock_multiprocessing_pool:
lowerCamelCase__ = {F'{i}': i for i in range(a_ )}
lowerCamelCase__ = map_nested(lambda __snake_case : x + 10 ,a_ ,num_proc=a_ ,parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class __A ( _UpperCAmelCase ):
'''simple docstring'''
@require_tf
def __lowerCamelCase ( self ):
'''simple docstring'''
import tensorflow as tf
from tensorflow.keras import layers
lowerCamelCase__ = layers.Dense(2 )
def gen_random_output():
lowerCamelCase__ = tf.random.uniform((1, 3) )
return model(lowerCamelCase_ ).numpy()
with temp_seed(4_2 , set_tensorflow=lowerCamelCase_ ):
lowerCamelCase__ = gen_random_output()
with temp_seed(4_2 , set_tensorflow=lowerCamelCase_ ):
lowerCamelCase__ = gen_random_output()
lowerCamelCase__ = gen_random_output()
np.testing.assert_equal(lowerCamelCase_ , lowerCamelCase_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __lowerCamelCase ( self ):
'''simple docstring'''
import torch
def gen_random_output():
lowerCamelCase__ = torch.nn.Linear(3 , 2 )
lowerCamelCase__ = torch.rand(1 , 3 )
return model(lowerCamelCase_ ).detach().numpy()
with temp_seed(4_2 , set_pytorch=lowerCamelCase_ ):
lowerCamelCase__ = gen_random_output()
with temp_seed(4_2 , set_pytorch=lowerCamelCase_ ):
lowerCamelCase__ = gen_random_output()
lowerCamelCase__ = gen_random_output()
np.testing.assert_equal(lowerCamelCase_ , lowerCamelCase_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __lowerCamelCase ( self ):
'''simple docstring'''
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(4_2 ):
lowerCamelCase__ = gen_random_output()
with temp_seed(4_2 ):
lowerCamelCase__ = gen_random_output()
lowerCamelCase__ = gen_random_output()
np.testing.assert_equal(lowerCamelCase_ , lowerCamelCase_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize('''input_data''' ,[{}] )
def lowerCAmelCase__(__snake_case ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = NestedDataStructure(a_ ).data
assert output_data == input_data
@pytest.mark.parametrize(
'''data, expected_output''' ,[
({}, []),
([], []),
('''foo''', ['''foo''']),
(['''foo''', '''bar'''], ['''foo''', '''bar''']),
([['''foo''', '''bar''']], ['''foo''', '''bar''']),
([[['''foo'''], ['''bar''']]], ['''foo''', '''bar''']),
([[['''foo'''], '''bar''']], ['''foo''', '''bar''']),
({'''a''': 1, '''b''': 2}, [1, 2]),
({'''a''': [1, 2], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[[3], [4]]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, [4]]}, [1, 2, 3, 4]),
({'''a''': {'''1''': 1}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': [2]}, [1, 2]),
] ,)
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = NestedDataStructure(a_ ).flatten()
assert output == expected_output
def lowerCAmelCase__() -> Any:
'''simple docstring'''
lowerCamelCase__ = A(x=1 ,y='''foobar''' )
lowerCamelCase__ = {'''x''': 1, '''y''': '''foobar'''}
assert asdict(a_ ) == expected_output
lowerCamelCase__ = {'''a''': {'''b''': A(x=10 ,y='''foo''' )}, '''c''': [A(x=20 ,y='''bar''' )]}
lowerCamelCase__ = {'''a''': {'''b''': {'''x''': 10, '''y''': '''foo'''}}, '''c''': [{'''x''': 20, '''y''': '''bar'''}]}
assert asdict(a_ ) == expected_output
with pytest.raises(a_ ):
asdict([1, A(x=10 ,y='''foo''' )] )
def lowerCAmelCase__(__snake_case ) -> List[str]:
'''simple docstring'''
return text.split()
def lowerCAmelCase__(__snake_case ) -> Union[str, Any]:
'''simple docstring'''
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def lowerCAmelCase__() -> Dict:
'''simple docstring'''
with Pool(2 ) as pool:
lowerCamelCase__ = list(iflatmap_unordered(a_ ,_split_text ,kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(a_ ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
lowerCamelCase__ = list(iflatmap_unordered(a_ ,_split_text ,kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(a_ ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
lowerCamelCase__ = []
for yield_time, content in iflatmap_unordered(
a_ ,_aseconds_generator_of_aitems_with_timing ,kwargs_iterable=[{'''content''': '''a'''}, {'''content''': '''b'''}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(a_ )
assert out.count('''a''' ) == 2
assert out.count('''b''' ) == 2
assert len(a_ ) == 4
| 718
|
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
_a = logging.get_logger(__name__)
class __A ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
warnings.warn(
'''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use OwlViTImageProcessor instead.''' , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
| 29
| 0
|
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
_a = logging.get_logger("transformers.models.encodec")
_a = {
'''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''',
'''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''',
'''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''',
'''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''',
}
_a = {
'''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''',
'''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''',
'''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''',
'''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''',
'''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''',
'''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''',
'''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''',
'''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''',
'''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''',
'''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''',
'''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''',
'''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''',
'''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''',
'''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''',
'''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''',
'''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''',
'''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''',
'''encoder.model.13.lstm''': '''encoder.layers.13.lstm''',
'''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''',
}
_a = {
'''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''',
'''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''',
'''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''',
'''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''',
'''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''',
'''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''',
'''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''',
'''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''',
'''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''',
'''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''',
'''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''',
'''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''',
'''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''',
'''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''',
'''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''',
'''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''',
'''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''',
'''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''',
}
_a = {
'''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''',
'''decoder.model.1.lstm''': '''decoder.layers.1.lstm''',
'''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''',
'''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''',
'''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''',
'''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''',
'''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''',
'''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''',
'''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''',
'''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''',
'''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''',
'''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''',
'''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''',
'''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''',
'''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''',
'''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''',
'''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''',
'''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''',
'''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''',
}
_a = {
'''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''',
'''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''',
'''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''',
'''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''',
'''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''',
'''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''',
'''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''',
'''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''',
'''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''',
'''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''',
'''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''',
'''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''',
'''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''',
'''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''',
'''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''',
'''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''',
'''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''',
'''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''',
}
_a = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
_a = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
_a = []
_a = []
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> str:
'''simple docstring'''
for attribute in key.split('''.''' ):
lowerCamelCase__ = getattr(_A ,_A )
if weight_type is not None:
lowerCamelCase__ = getattr(_A ,_A ).shape
else:
lowerCamelCase__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
lowerCamelCase__ = value
elif weight_type == "weight_g":
lowerCamelCase__ = value
elif weight_type == "weight_v":
lowerCamelCase__ = value
elif weight_type == "bias":
lowerCamelCase__ = value
elif weight_type == "running_mean":
lowerCamelCase__ = value
elif weight_type == "running_var":
lowerCamelCase__ = value
elif weight_type == "num_batches_tracked":
lowerCamelCase__ = value
elif weight_type == "weight_ih_l0":
lowerCamelCase__ = value
elif weight_type == "weight_hh_l0":
lowerCamelCase__ = value
elif weight_type == "bias_ih_l0":
lowerCamelCase__ = value
elif weight_type == "bias_hh_l0":
lowerCamelCase__ = value
elif weight_type == "weight_ih_l1":
lowerCamelCase__ = value
elif weight_type == "weight_hh_l1":
lowerCamelCase__ = value
elif weight_type == "bias_ih_l1":
lowerCamelCase__ = value
elif weight_type == "bias_hh_l1":
lowerCamelCase__ = value
else:
lowerCamelCase__ = value
logger.info(F'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Optional[Any]:
'''simple docstring'''
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowerCamelCase__ , lowerCamelCase__ = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> List[str]:
'''simple docstring'''
lowerCamelCase__ = []
if model_name == "encodec_24khz" or "encodec_32khz":
lowerCamelCase__ = MAPPING_24K
elif model_name == "encodec_48khz":
lowerCamelCase__ = MAPPING_48K
else:
raise ValueError(F'Unsupported model: {model_name}' )
for name, value in orig_dict.items():
if should_ignore(_A ,_A ):
logger.info(F'{name} was ignored' )
continue
lowerCamelCase__ = False
for key, mapped_key in MAPPING.items():
if "*" in key:
lowerCamelCase__ , lowerCamelCase__ = key.split('''.*.''' )
if prefix in name and suffix in name:
lowerCamelCase__ = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('''embed''' ) and name.endswith('''embed_avg''' ):
continue
lowerCamelCase__ = True
if "*" in mapped_key:
lowerCamelCase__ = name.split(_A )[0].split('''.''' )[-2]
lowerCamelCase__ = mapped_key.replace('''*''' ,_A )
if "weight_g" in name:
lowerCamelCase__ = '''weight_g'''
elif "weight_v" in name:
lowerCamelCase__ = '''weight_v'''
elif "weight_ih_l0" in name:
lowerCamelCase__ = '''weight_ih_l0'''
elif "weight_hh_l0" in name:
lowerCamelCase__ = '''weight_hh_l0'''
elif "bias_ih_l0" in name:
lowerCamelCase__ = '''bias_ih_l0'''
elif "bias_hh_l0" in name:
lowerCamelCase__ = '''bias_hh_l0'''
elif "weight_ih_l1" in name:
lowerCamelCase__ = '''weight_ih_l1'''
elif "weight_hh_l1" in name:
lowerCamelCase__ = '''weight_hh_l1'''
elif "bias_ih_l1" in name:
lowerCamelCase__ = '''bias_ih_l1'''
elif "bias_hh_l1" in name:
lowerCamelCase__ = '''bias_hh_l1'''
elif "bias" in name:
lowerCamelCase__ = '''bias'''
elif "weight" in name:
lowerCamelCase__ = '''weight'''
elif "running_mean" in name:
lowerCamelCase__ = '''running_mean'''
elif "running_var" in name:
lowerCamelCase__ = '''running_var'''
elif "num_batches_tracked" in name:
lowerCamelCase__ = '''num_batches_tracked'''
else:
lowerCamelCase__ = None
set_recursively(_A ,_A ,_A ,_A ,_A )
continue
if not is_used:
unused_weights.append(_A )
logger.warning(F'Unused weights: {unused_weights}' )
@torch.no_grad()
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case=None ,__snake_case=None ,) -> Tuple:
'''simple docstring'''
if config_path is not None:
lowerCamelCase__ = EncodecConfig.from_pretrained(_A )
else:
lowerCamelCase__ = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
lowerCamelCase__ = [8, 5, 4, 4]
lowerCamelCase__ = [2.2]
lowerCamelCase__ = 64
lowerCamelCase__ = 32000
lowerCamelCase__ = 2048
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
elif model_name == "encodec_48khz":
lowerCamelCase__ = [8, 5, 4, 2]
lowerCamelCase__ = [3.0, 6.0, 1_2.0, 2_4.0]
lowerCamelCase__ = 48000
lowerCamelCase__ = 2
lowerCamelCase__ = False
lowerCamelCase__ = '''time_group_norm'''
lowerCamelCase__ = True
lowerCamelCase__ = 1.0
lowerCamelCase__ = 0.0_1
else:
raise ValueError(F'Unknown model name: {model_name}' )
lowerCamelCase__ = EncodecModel(_A )
lowerCamelCase__ = EncodecFeatureExtractor(
feature_size=config.audio_channels ,sampling_rate=config.sampling_rate ,chunk_length_s=config.chunk_length_s ,overlap=config.overlap ,)
feature_extractor.save_pretrained(_A )
lowerCamelCase__ = torch.load(_A )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
lowerCamelCase__ = original_checkpoint['''best_state''']
recursively_load_weights(_A ,_A ,_A )
model.save_pretrained(_A )
if repo_id:
print('''Pushing to the hub...''' )
feature_extractor.push_to_hub(_A )
model.push_to_hub(_A )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument(
"--model",
default="encodec_24khz",
type=str,
help="The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
_a = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 719
|
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> Any:
'''simple docstring'''
lowerCamelCase__ = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowerCamelCase__ = {
'''wmt16-en-de-dist-12-1''': [2_8.3, 2_7.5_2],
'''wmt16-en-de-dist-6-1''': [2_7.4, 2_7.1_1],
'''wmt16-en-de-12-1''': [2_6.9, 2_5.7_5],
}
lowerCamelCase__ = F'{src_lang}-{tgt_lang}'
lowerCamelCase__ = F'\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "allenai/{model_name}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n'
model_card_dir.mkdir(parents=__snake_case ,exist_ok=__snake_case )
lowerCamelCase__ = os.path.join(__snake_case ,'''README.md''' )
print(F'Generating {path}' )
with open(__snake_case ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(__snake_case )
# make sure we are under the root of the project
_a = Path(__file__).resolve().parent.parent.parent
_a = repo_dir / "model_cards"
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
_a = model_cards_dir / "allenai" / model_name
write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
| 29
| 0
|
import argparse
from collections import defaultdict
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> str:
'''simple docstring'''
lowerCamelCase__ = F'{file}_{class_name}_{test_name}'
done_test[_id] += 1
with open(__snake_case ,'''r''' ) as f:
lowerCamelCase__ = f.readlines()
lowerCamelCase__ = F'class {class_name}('
lowerCamelCase__ = F'{4 * " "}def {test_name}('
lowerCamelCase__ = F'{8 * " "}{correct_line.split()[0]}'
lowerCamelCase__ = F'{16 * " "}{correct_line.split()[0]}'
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = []
for line in lines:
if line.startswith(__snake_case ):
lowerCamelCase__ = True
elif in_class and line.startswith(__snake_case ):
lowerCamelCase__ = True
elif in_class and in_func and (line.startswith(__snake_case ) or line.startswith(__snake_case )):
lowerCamelCase__ = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
lowerCamelCase__ = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
lowerCamelCase__ = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F'{spaces * " "}{correct_line}' )
lowerCamelCase__ = lowerCamelCase__ = lowerCamelCase__ = lowerCamelCase__ = False
else:
new_lines.append(__snake_case )
with open(__snake_case ,'''w''' ) as f:
for line in new_lines:
f.write(__snake_case )
def lowerCAmelCase__(__snake_case ,__snake_case=None ) -> str:
'''simple docstring'''
if fail is not None:
with open(__snake_case ,'''r''' ) as f:
lowerCamelCase__ = {l.strip() for l in f.readlines()}
else:
lowerCamelCase__ = None
with open(__snake_case ,'''r''' ) as f:
lowerCamelCase__ = f.readlines()
lowerCamelCase__ = defaultdict(__snake_case )
for line in correct_lines:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = line.split(''';''' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument("--correct_filename", help="filename of tests with expected result")
parser.add_argument("--fail_filename", help="filename of test failures", type=str, default=None)
_a = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 720
|
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
_a = logging.get_logger(__name__)
class __A ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
warnings.warn(
'''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use SegformerImageProcessor instead.''' , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
| 29
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.