code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f"{price_plus_tax(1_00, 0.25) = }")
print(f"{price_plus_tax(125.50, 0.05) = }")
| 0 |
"""simple docstring"""
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCAmelCase :
def __init__( self , a_ , a_=2 , a_=3 , a_=4 , a_=2 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=99 , a_=36 , a_=3 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=16 , a_=2 , a_=0.02 , a_=6 , a_=6 , a_=3 , a_=4 , a_=None , a_=1000 , ) -> Optional[Any]:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = text_seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = coordinate_size
_UpperCAmelCase = shape_size
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_UpperCAmelCase = text_seq_length
_UpperCAmelCase = (image_size // patch_size) ** 2 + 1
_UpperCAmelCase = self.text_seq_length + self.image_seq_length
def _a ( self ) -> Dict:
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_UpperCAmelCase = bbox[i, j, 3]
_UpperCAmelCase = bbox[i, j, 1]
_UpperCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_UpperCAmelCase = bbox[i, j, 2]
_UpperCAmelCase = bbox[i, j, 0]
_UpperCAmelCase = t
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.text_seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_UpperCAmelCase = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Tuple:
_UpperCAmelCase = LayoutLMvaModel(config=a_ )
model.to(a_ )
model.eval()
# text + image
_UpperCAmelCase = model(a_ , pixel_values=a_ )
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ )
_UpperCAmelCase = model(a_ , bbox=a_ , pixel_values=a_ , token_type_ids=a_ )
_UpperCAmelCase = model(a_ , bbox=a_ , pixel_values=a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_UpperCAmelCase = model(a_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_UpperCAmelCase = model(pixel_values=a_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Optional[Any]:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = LayoutLMvaForSequenceClassification(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Union[str, Any]:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = LayoutLMvaForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Dict:
_UpperCAmelCase = LayoutLMvaForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , start_positions=a_ , end_positions=a_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self ) -> Optional[int]:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase_ : Any = False
lowercase_ : Dict = False
lowercase_ : List[str] = False
lowercase_ : str = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase_ : int = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def _a ( self , a_ , a_ , a_ , a_ , a_ ) -> List[str]:
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = LayoutLMvaModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=a_ , hidden_size=37 )
def _a ( self , a_ , a_ , a_=False ) -> List[str]:
_UpperCAmelCase = copy.deepcopy(a_ )
if model_class in get_values(a_ ):
_UpperCAmelCase = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(a_ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(a_ ):
_UpperCAmelCase = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=a_ )
elif model_class in get_values(a_ ):
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
elif model_class in [
*get_values(a_ ),
]:
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
elif model_class in [
*get_values(a_ ),
]:
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=a_ , )
return inputs_dict
def _a ( self ) -> int:
self.config_tester.run_common_tests()
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase = type
self.model_tester.create_and_check_model(*a_ )
def _a ( self ) -> int:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a_ )
def _a ( self ) -> Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a_ )
@slow
def _a ( self ) -> List[str]:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = LayoutLMvaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _a ( self ) -> List[Any]:
return LayoutLMvaImageProcessor(apply_ocr=a_ ) if is_vision_available() else None
@slow
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(a_ )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=a_ , return_tensors="pt" ).pixel_values.to(a_ )
_UpperCAmelCase = torch.tensor([[1, 2]] )
_UpperCAmelCase = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
_UpperCAmelCase = model(
input_ids=input_ids.to(a_ ) , bbox=bbox.to(a_ ) , pixel_values=pixel_values.to(a_ ) , )
# verify the logits
_UpperCAmelCase = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , a_ )
_UpperCAmelCase = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , a_ , atol=1e-4 ) )
| 657 | 0 |
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
__snake_case = (
'''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'''
)
def _A ( _lowercase , _lowercase ) -> str:
"""simple docstring"""
warnings.warn(_lowercase , _lowercase )
requires_backends(_lowercase , 'sklearn' )
return (preds == labels).mean()
def _A ( _lowercase , _lowercase ) -> Optional[int]:
"""simple docstring"""
warnings.warn(_lowercase , _lowercase )
requires_backends(_lowercase , 'sklearn' )
__UpperCamelCase = simple_accuracy(_lowercase , _lowercase )
__UpperCamelCase = fa_score(y_true=_lowercase , y_pred=_lowercase )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def _A ( _lowercase , _lowercase ) -> List[str]:
"""simple docstring"""
warnings.warn(_lowercase , _lowercase )
requires_backends(_lowercase , 'sklearn' )
__UpperCamelCase = pearsonr(_lowercase , _lowercase )[0]
__UpperCamelCase = spearmanr(_lowercase , _lowercase )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def _A ( _lowercase , _lowercase , _lowercase ) -> Dict:
"""simple docstring"""
warnings.warn(_lowercase , _lowercase )
requires_backends(_lowercase , 'sklearn' )
assert len(_lowercase ) == len(_lowercase ), f'''Predictions and labels have mismatched lengths {len(_lowercase )} and {len(_lowercase )}'''
if task_name == "cola":
return {"mcc": matthews_corrcoef(_lowercase , _lowercase )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(_lowercase , _lowercase )}
elif task_name == "mrpc":
return acc_and_fa(_lowercase , _lowercase )
elif task_name == "sts-b":
return pearson_and_spearman(_lowercase , _lowercase )
elif task_name == "qqp":
return acc_and_fa(_lowercase , _lowercase )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(_lowercase , _lowercase )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(_lowercase , _lowercase )}
elif task_name == "qnli":
return {"acc": simple_accuracy(_lowercase , _lowercase )}
elif task_name == "rte":
return {"acc": simple_accuracy(_lowercase , _lowercase )}
elif task_name == "wnli":
return {"acc": simple_accuracy(_lowercase , _lowercase )}
elif task_name == "hans":
return {"acc": simple_accuracy(_lowercase , _lowercase )}
else:
raise KeyError(_lowercase )
def _A ( _lowercase , _lowercase , _lowercase ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(_lowercase , _lowercase )
requires_backends(_lowercase , 'sklearn' )
if len(_lowercase ) != len(_lowercase ):
raise ValueError(f'''Predictions and labels have mismatched lengths {len(_lowercase )} and {len(_lowercase )}''' )
if task_name == "xnli":
return {"acc": simple_accuracy(_lowercase , _lowercase )}
else:
raise KeyError(_lowercase )
| 1 |
"""simple docstring"""
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _lowerCAmelCase ( unittest.TestCase ):
lowercase_ : str = MODEL_FOR_MASKED_LM_MAPPING
lowercase_ : List[str] = TF_MODEL_FOR_MASKED_LM_MAPPING
def _a ( self ) -> Optional[Any]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def _a ( self ) -> str:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf" )
_UpperCAmelCase = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is grouped", "score": 2.1e-05, "token": 38015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-05, "token": 25506, "token_str": " accuser"},
] , )
_UpperCAmelCase = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-05,
"token": 38015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-05,
"token": 25506,
"token_str": " accuser",
},
] , )
_UpperCAmelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-05, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt" )
_UpperCAmelCase = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is Maul", "score": 2.2e-05, "token": 35676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
_UpperCAmelCase = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
_UpperCAmelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is Patrick", "score": 2.1e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-05, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
] , )
_UpperCAmelCase = unmasker("My name is <mask> <mask>" , top_k=2 )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def _a ( self ) -> int:
_UpperCAmelCase = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt" )
# convert model to fp16
pipe.model.half()
_UpperCAmelCase = pipe("Paris is the [MASK] of France." )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(a_ , a_ )
@slow
@require_torch
def _a ( self ) -> int:
_UpperCAmelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt" )
self.run_large_test(a_ )
@slow
@require_tf
def _a ( self ) -> int:
_UpperCAmelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf" )
self.run_large_test(a_ )
def _a ( self , a_ ) -> int:
_UpperCAmelCase = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(a_ ) , [
{"sequence": "My name is John", "score": 0.008, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.007, "token": 1573, "token_str": " Chris"},
] , )
_UpperCAmelCase = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(a_ ) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.251,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.214,
"token": 12790,
"token_str": " Lyon",
},
] , )
_UpperCAmelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(a_ ) , [
{"sequence": "My name is Patrick", "score": 0.005, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.000, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.000, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def _a ( self ) -> Any:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt" )
_UpperCAmelCase = None
_UpperCAmelCase = None
self.run_pipeline_test(a_ , [] )
@require_tf
def _a ( self ) -> List[Any]:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf" )
_UpperCAmelCase = None
_UpperCAmelCase = None
self.run_pipeline_test(a_ , [] )
def _a ( self , a_ , a_ , a_ ) -> Optional[Any]:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)" )
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = [
f"This is another {tokenizer.mask_token} test",
]
return fill_masker, examples
def _a ( self , a_ , a_ ) -> List[str]:
_UpperCAmelCase = fill_masker.tokenizer
_UpperCAmelCase = fill_masker.model
_UpperCAmelCase = fill_masker(
f"This is a {tokenizer.mask_token}" , )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = fill_masker([f"This is a {tokenizer.mask_token}"] )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = fill_masker([f"This is a {tokenizer.mask_token}", f"Another {tokenizer.mask_token} great test."] )
self.assertEqual(
a_ , [
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
] , )
with self.assertRaises(a_ ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(a_ ):
fill_masker("This is" )
self.run_test_top_k(a_ , a_ )
self.run_test_targets(a_ , a_ )
self.run_test_top_k_targets(a_ , a_ )
self.fill_mask_with_duplicate_targets_and_top_k(a_ , a_ )
self.fill_mask_with_multiple_masks(a_ , a_ )
def _a ( self , a_ , a_ ) -> Optional[int]:
_UpperCAmelCase = tokenizer.get_vocab()
_UpperCAmelCase = sorted(vocab.keys() )[:2]
# Pipeline argument
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ , targets=a_ )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , a_ )
_UpperCAmelCase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(a_ ) )
# Call argument
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=a_ )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , a_ )
_UpperCAmelCase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(a_ ) )
# Score equivalence
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=a_ )
_UpperCAmelCase = [top_mask["token_str"] for top_mask in outputs]
_UpperCAmelCase = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(a_ ) == set(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=a_ )
_UpperCAmelCase = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(a_ ) , nested_simplify(a_ ) )
# Raises with invalid
with self.assertRaises(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=[""] )
with self.assertRaises(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets="" )
def _a ( self , a_ , a_ ) -> str:
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ , top_k=2 )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
self.assertEqual(nested_simplify(a_ ) , nested_simplify(a_ ) )
def _a ( self , a_ , a_ ) -> List[Any]:
_UpperCAmelCase = tokenizer.get_vocab()
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
# top_k=2, ntargets=3
_UpperCAmelCase = sorted(vocab.keys() )[:3]
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=2 , targets=a_ )
# If we use the most probably targets, and filter differently, we should still
# have the same results
_UpperCAmelCase = [el["token_str"] for el in sorted(a_ , key=lambda a_ : x["score"] , reverse=a_ )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(a_ ).issubset(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=3 , targets=a_ )
# They should yield exactly the same result
self.assertEqual(nested_simplify(a_ ) , nested_simplify(a_ ) )
def _a ( self , a_ , a_ ) -> Optional[Any]:
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = tokenizer.get_vocab()
# String duplicates + id duplicates
_UpperCAmelCase = sorted(vocab.keys() )[:3]
_UpperCAmelCase = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_UpperCAmelCase = fill_masker(f"My name is {tokenizer.mask_token}" , targets=a_ , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(a_ ) , 3 )
def _a ( self , a_ , a_ ) -> Any:
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = fill_masker(
f"This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
a_ , [
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
] , )
| 657 | 0 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : Union[str, Any] = ""
a__ : Optional[int] = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self : List[str] , __lowerCAmelCase : Optional[DatasetInfo] = None , __lowerCAmelCase : Optional[str] = None , **__lowerCAmelCase : str , ) -> Tuple:
super().__init__(self , **__lowerCAmelCase )
_A = repo_info
_A = token
_A = None
def snake_case_ ( self : Dict ) -> str:
if self.dir_cache is None:
_A = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
_A = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(__lowerCAmelCase ): {'''name''': str(__lowerCAmelCase ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def snake_case_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : str = "rb" , **__lowerCAmelCase : Tuple , ) -> Dict:
if not isinstance(self.repo_info , __lowerCAmelCase ):
raise NotImplementedError(f'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
_A = hf_hub_url(self.repo_info.id , __lowerCAmelCase , revision=self.repo_info.sha )
return fsspec.open(
__lowerCAmelCase , mode=__lowerCAmelCase , headers=get_authentication_headers_for_url(__lowerCAmelCase , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open()
def snake_case_ ( self : Optional[int] , __lowerCAmelCase : List[str] , **__lowerCAmelCase : int ) -> Optional[Any]:
self._get_dirs()
_A = self._strip_protocol(__lowerCAmelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__lowerCAmelCase )
def snake_case_ ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : int=False , **__lowerCAmelCase : Any ) -> Union[str, Any]:
self._get_dirs()
_A = PurePosixPath(path.strip('''/''' ) )
_A = {}
for p, f in self.dir_cache.items():
_A = PurePosixPath(p.strip('''/''' ) )
_A = p.parent
if root == path:
_A = f
_A = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 2 |
"""simple docstring"""
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class _lowerCAmelCase ( lowerCamelCase ):
def _a ( self ) -> List[str]:
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def _a ( self ) -> Optional[int]:
with self.assertRaises(a_ ):
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def _a ( self ) -> int:
with self.assertRaises(a_ ):
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("bool" ) , type=Value("int64" ) ) )
def _a ( self ) -> Optional[Any]:
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _a ( self ) -> int:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , type=Value("int64" ) ) )
def _a ( self ) -> Dict:
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , try_type=Value("int64" ) ) )
self.assertEqual(arr.type , pa.string() )
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def _a ( self ) -> Tuple:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , type=ArrayaD((1, 3) , "int64" ) ) )
def _a ( self ) -> str:
_UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def _a ( self ) -> Tuple:
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def _a ( self ) -> List[str]:
import PIL.Image
_UpperCAmelCase = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"datasets.arrow_writer.cast_to_python_objects" , side_effect=a_ ) as mock_cast_to_python_objects:
_UpperCAmelCase = pa.array(TypedSequence([{"path": None, "bytes": B"image_bytes"}, pil_image] , type=Image() ) )
_UpperCAmelCase , _UpperCAmelCase = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("optimize_list_casting" , a_ )
self.assertFalse(kwargs["optimize_list_casting"] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferReader(UpperCamelCase__ ) if isinstance(UpperCamelCase__ , pa.Buffer ) else pa.memory_map(UpperCamelCase__ )
_UpperCAmelCase = pa.ipc.open_stream(UpperCamelCase__ )
_UpperCAmelCase = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = Features({"labels": ClassLabel(names=["neg", "pos"] )} )
with ArrowWriter(stream=UpperCamelCase__ , features=UpperCamelCase__ ) as writer:
writer.write({"labels": 0} )
writer.write({"labels": 1} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pa.ipc.open_stream(UpperCamelCase__ )
_UpperCAmelCase = f.read_all()
_UpperCAmelCase = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(UpperCamelCase__ )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ , hash_salt="split_name" , check_duplicates=UpperCamelCase__ , ) as writer:
with pytest.raises(UpperCamelCase__ ):
writer.write({"col_1": "foo", "col_2": 1} , key=[1, 2] )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ , hash_salt="split_name" , check_duplicates=UpperCamelCase__ , ) as writer:
with pytest.raises(UpperCamelCase__ ):
writer.write({"col_1": "foo", "col_2": 1} , key=10 )
writer.write({"col_1": "bar", "col_2": 2} , key=10 )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ , hash_salt="split_name" , check_duplicates=UpperCamelCase__ , ) as writer:
writer.write({"col_1": "foo", "col_2": 1} , key=1 )
writer.write({"col_1": "bar", "col_2": 2} , key=2 )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
writer.write_batch({"col_1": [], "col_2": []} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write_table(pa.Table.from_pydict({"col_1": ["foo", "bar"], "col_2": [1, 2]} ) )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write_row(pa.Table.from_pydict({"col_1": ["foo"], "col_2": [1]} ) )
writer.write_row(pa.Table.from_pydict({"col_1": ["bar"], "col_2": [2]} ) )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __lowerCamelCase ( ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
_UpperCAmelCase = os.path.join(UpperCamelCase__ , "test.arrow" )
with ArrowWriter(path=UpperCamelCase__ , schema=pa.schema(UpperCamelCase__ ) ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(UpperCamelCase__ , 1 )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if pa.types.is_list(UpperCamelCase__ ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if isinstance(lst[0] , UpperCamelCase__ ):
change_first_primitive_element_in_list(lst[0] , UpperCamelCase__ )
else:
_UpperCAmelCase = value
@pytest.mark.parametrize("optimized_int_type, expected_dtype" , [(None, pa.intaa()), (Value("int32" ), pa.intaa())] )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.array(TypedSequence(UpperCamelCase__ , optimized_int_type=UpperCamelCase__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"col, expected_dtype" , [
("attention_mask", pa.inta()),
("special_tokens_mask", pa.inta()),
("token_type_ids", pa.inta()),
("input_ids", pa.intaa()),
("other", pa.intaa()),
] , )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.array(OptimizedTypedSequence(UpperCamelCase__ , col=UpperCamelCase__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
_UpperCAmelCase = copy.deepcopy(UpperCamelCase__ )
_UpperCAmelCase = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase = pa.array(OptimizedTypedSequence(UpperCamelCase__ , col=UpperCamelCase__ ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("raise_exception" , [False, True] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = str(tmp_path / "dataset-train.arrow" )
try:
with ArrowWriter(path=UpperCamelCase__ ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = "mock://dataset-train.arrow"
with ArrowWriter(path=UpperCamelCase__ , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(UpperCamelCase__ ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(UpperCamelCase__ )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(stream=UpperCamelCase__ ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pq.read_table(UpperCamelCase__ )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("embed_local_files" , [False, True] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
import PIL.Image
_UpperCAmelCase = str(tmp_path / "test_image_rgb.jpg" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(UpperCamelCase__ , format="png" )
_UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(
stream=UpperCamelCase__ , features=Features({"image": Image()} ) , embed_local_files=UpperCamelCase__ ) as writer:
writer.write({"image": image_path} )
writer.finalize()
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pq.read_table(UpperCamelCase__ )
_UpperCAmelCase = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["image"][0]["path"] , UpperCamelCase__ )
with open(UpperCamelCase__ , "rb" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = pa.schema([pa.field("col_1" , pa.string() , nullable=UpperCamelCase__ )] )
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(stream=UpperCamelCase__ ) as writer:
writer._build_writer(inferred_schema=UpperCamelCase__ )
assert writer._schema == pa.schema([pa.field("col_1" , pa.string() )] )
| 657 | 0 |
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = ["""audio_values""", """audio_mask"""]
def __init__( self , A_=2048 , A_=1 , A_=[16, 16] , A_=128 , A_=44100 , A_=86 , A_=2048 , A_=0.0 , **A_ , )-> Dict:
'''simple docstring'''
super().__init__(
feature_size=A_ , sampling_rate=A_ , padding_value=A_ , **A_ , )
UpperCamelCase = spectrogram_length
UpperCamelCase = num_channels
UpperCamelCase = patch_size
UpperCamelCase = feature_size // self.patch_size[1]
UpperCamelCase = n_fft
UpperCamelCase = sampling_rate // hop_length_to_sampling_rate
UpperCamelCase = sampling_rate
UpperCamelCase = padding_value
UpperCamelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=A_ , min_frequency=0.0 , max_frequency=22_050.0 , sampling_rate=A_ , norm='slaney' , mel_scale='slaney' , ).T
def UpperCAmelCase_ ( self , A_ )-> np.ndarray:
'''simple docstring'''
UpperCamelCase = spectrogram(
A_ , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=80.0 , )
UpperCamelCase = log_spec[:, :-1]
UpperCamelCase = log_spec - 20.0
UpperCamelCase = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , A_ , A_ = None , A_ = True , A_ = None , A_ = False , A_ = False , **A_ , )-> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
F''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
UpperCamelCase = isinstance(A_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
UpperCamelCase = is_batched_numpy or (
isinstance(A_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCamelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(A_ , np.ndarray ):
UpperCamelCase = np.asarray(A_ , dtype=np.floataa )
elif isinstance(A_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCamelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
UpperCamelCase = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , A_ ):
UpperCamelCase = [np.asarray(A_ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
UpperCamelCase = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
UpperCamelCase = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
UpperCamelCase = np.array(A_ ).astype(np.floataa )
# convert into correct format for padding
UpperCamelCase = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
UpperCamelCase = np.ones([len(A_ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
UpperCamelCase = padded_audio_features * self.padding_value
for i in range(len(A_ ) ):
UpperCamelCase = audio_features[i]
UpperCamelCase = feature
# return as BatchFeature
if return_attention_mask:
UpperCamelCase = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
UpperCamelCase = {'audio_values': padded_audio_features}
UpperCamelCase = BatchFeature(data=A_ , tensor_type=A_ )
return encoded_inputs
| 3 |
"""simple docstring"""
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class _lowerCAmelCase ( unittest.TestCase ):
def _a ( self ) -> Optional[Any]:
_UpperCAmelCase = ["a", "b", "c"]
# Defaults to last layer if both are None
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(a_ , a_ , a_ )
self.assertEqual(a_ , ["c"] )
self.assertEqual(a_ , [2] )
# Out indices set to match out features
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(["a", "c"] , a_ , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features set to match out indices
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(a_ , [0, 2] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features selected from negative indices
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(a_ , [-3, -1] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [-3, -1] )
def _a ( self ) -> Optional[int]:
# Stage names must be set
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , a_ )
# Out features must be a list
with self.assertRaises(a_ ):
verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] )
# Out features must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] )
# Out indices must be a list or tuple
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , 0 , ["a", "b"] )
# Out indices must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , (0, 1) , ["a"] )
# Out features and out indices must be the same length
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] )
# Out features should match out indices
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] )
# Out features and out indices should be in order
with self.assertRaises(a_ ):
verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] )
# Check passes with valid inputs
verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] )
def _a ( self ) -> int:
_UpperCAmelCase = BackboneMixin()
_UpperCAmelCase = ["a", "b", "c"]
_UpperCAmelCase = ["a", "c"]
_UpperCAmelCase = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
_UpperCAmelCase = ["a", "b"]
self.assertEqual(backbone.out_features , ["a", "b"] )
self.assertEqual(backbone.out_indices , [0, 1] )
_UpperCAmelCase = [-3, -1]
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 657 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase : str = {
'''configuration_deberta''': ['''DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DebertaConfig''', '''DebertaOnnxConfig'''],
'''tokenization_deberta''': ['''DebertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = ['''DebertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'''DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DebertaForMaskedLM''',
'''DebertaForQuestionAnswering''',
'''DebertaForSequenceClassification''',
'''DebertaForTokenClassification''',
'''DebertaModel''',
'''DebertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = [
'''TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDebertaForMaskedLM''',
'''TFDebertaForQuestionAnswering''',
'''TFDebertaForSequenceClassification''',
'''TFDebertaForTokenClassification''',
'''TFDebertaModel''',
'''TFDebertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 0 |
'''simple docstring'''
import os
def A ():
_lowerCAmelCase = os.path.join(os.path.dirname(__lowerCamelCase ) , """num.txt""" )
with open(__lowerCamelCase ) as file_hand:
return str(sum(int(__lowerCamelCase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 5 |
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class _lowerCAmelCase ( lowerCamelCase , unittest.TestCase ):
lowercase_ : Tuple = BarthezTokenizer
lowercase_ : List[Any] = BarthezTokenizerFast
lowercase_ : Dict = True
lowercase_ : int = True
def _a ( self ) -> Any:
super().setUp()
_UpperCAmelCase = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=a_ )
_UpperCAmelCase = tokenizer
def _a ( self ) -> List[Any]:
_UpperCAmelCase = "<pad>"
_UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(a_ ) , 101122 )
def _a ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def _a ( self ) -> List[Any]:
_UpperCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_UpperCAmelCase = [0, 57, 3018, 70307, 91, 2]
_UpperCAmelCase = self.tokenizer(
a_ , max_length=len(a_ ) , padding=a_ , truncation=a_ , return_tensors="pt" )
self.assertIsInstance(a_ , a_ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
_UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(a_ , a_ )
def _a ( self ) -> str:
if not self.test_rust_tokenizer:
return
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = "I was born in 92000, and this is falsé."
_UpperCAmelCase = tokenizer.tokenize(a_ )
_UpperCAmelCase = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
_UpperCAmelCase = tokenizer.encode(a_ , add_special_tokens=a_ )
_UpperCAmelCase = rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = tokenizer.encode(a_ )
_UpperCAmelCase = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
@slow
def _a ( self ) -> Dict:
# fmt: off
_UpperCAmelCase = {"input_ids": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_UpperCAmelCase = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=a_ , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=a_ , )
| 657 | 0 |
from typing import Any
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: list ):
if not input_list:
return []
SCREAMING_SNAKE_CASE__ = [input_list.count(UpperCamelCase__ ) for value in input_list]
SCREAMING_SNAKE_CASE__ = max(UpperCamelCase__ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(UpperCamelCase__ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod() | 6 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_UpperCAmelCase = f"Input value of [number={number}] must be an integer"
raise TypeError(UpperCamelCase__ )
if number < 0:
return False
_UpperCAmelCase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 | 0 |
"""simple docstring"""
import unittest
import numpy as np
def _snake_case ( _snake_case : np.ndarray , _snake_case : np.ndarray , _snake_case : np.ndarray , _snake_case : np.ndarray | None = None , ) -> np.ndarray:
'''simple docstring'''
_A = np.shape(_snake_case )
_A = np.shape(_snake_case )
_A = np.shape(_snake_case )
if shape_a[0] != shape_b[0]:
_A = (
'Expected the same number of rows for A and B. '
F'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(_snake_case )
if shape_b[1] != shape_c[1]:
_A = (
'Expected the same number of columns for B and C. '
F'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(_snake_case )
_A = pseudo_inv
if a_inv is None:
try:
_A = np.linalg.inv(_snake_case )
except np.linalg.LinAlgError:
raise ValueError(
'Input matrix A is not invertible. Cannot compute Schur complement.' )
return mat_c - mat_b.T @ a_inv @ mat_b
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Optional[int] ):
_A = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_A = np.array([[0, 3], [3, 0], [2, 3]] )
_A = np.array([[2, 1], [6, 3]] )
_A = schur_complement(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_A = np.block([[a, b], [b.T, c]] )
_A = np.linalg.det(_UpperCAmelCase )
_A = np.linalg.det(_UpperCAmelCase )
_A = np.linalg.det(_UpperCAmelCase )
self.assertAlmostEqual(_UpperCAmelCase , det_a * det_s )
def lowerCAmelCase_ ( self : str ):
_A = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_A = np.array([[0, 3], [3, 0], [2, 3]] )
_A = np.array([[2, 1], [6, 3]] )
with self.assertRaises(_UpperCAmelCase ):
schur_complement(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
_A = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_A = np.array([[0, 3], [3, 0], [2, 3]] )
_A = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(_UpperCAmelCase ):
schur_complement(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 7 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = Dict[str, Any]
__magic_name__ = List[Prediction]
@add_end_docstrings(lowerCamelCase )
class _lowerCAmelCase ( lowerCamelCase ):
def __init__( self , *a_ , **a_ ) -> Optional[int]:
super().__init__(*a_ , **a_ )
if self.framework == "tf":
raise ValueError(f"The {self.__class__} is only available in PyTorch." )
requires_backends(self , "vision" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def _a ( self , **a_ ) -> List[str]:
_UpperCAmelCase = {}
if "threshold" in kwargs:
_UpperCAmelCase = kwargs["threshold"]
return {}, {}, postprocess_kwargs
def __call__( self , *a_ , **a_ ) -> Union[Predictions, List[Prediction]]:
return super().__call__(*a_ , **a_ )
def _a ( self , a_ ) -> Optional[Any]:
_UpperCAmelCase = load_image(a_ )
_UpperCAmelCase = torch.IntTensor([[image.height, image.width]] )
_UpperCAmelCase = self.image_processor(images=[image] , return_tensors="pt" )
if self.tokenizer is not None:
_UpperCAmelCase = self.tokenizer(text=inputs["words"] , boxes=inputs["boxes"] , return_tensors="pt" )
_UpperCAmelCase = target_size
return inputs
def _a ( self , a_ ) -> Optional[Any]:
_UpperCAmelCase = model_inputs.pop("target_size" )
_UpperCAmelCase = self.model(**a_ )
_UpperCAmelCase = outputs.__class__({"target_size": target_size, **outputs} )
if self.tokenizer is not None:
_UpperCAmelCase = model_inputs["bbox"]
return model_outputs
def _a ( self , a_ , a_=0.9 ) -> int:
_UpperCAmelCase = model_outputs["target_size"]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
_UpperCAmelCase , _UpperCAmelCase = target_size[0].tolist()
def unnormalize(a_ ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
_UpperCAmelCase , _UpperCAmelCase = model_outputs["logits"].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
_UpperCAmelCase = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
_UpperCAmelCase = [unnormalize(a_ ) for bbox in model_outputs["bbox"].squeeze(0 )]
_UpperCAmelCase = ["score", "label", "box"]
_UpperCAmelCase = [dict(zip(a_ , a_ ) ) for vals in zip(scores.tolist() , a_ , a_ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
_UpperCAmelCase = self.image_processor.post_process_object_detection(a_ , a_ , a_ )
_UpperCAmelCase = raw_annotations[0]
_UpperCAmelCase = raw_annotation["scores"]
_UpperCAmelCase = raw_annotation["labels"]
_UpperCAmelCase = raw_annotation["boxes"]
_UpperCAmelCase = scores.tolist()
_UpperCAmelCase = [self.model.config.idalabel[label.item()] for label in labels]
_UpperCAmelCase = [self._get_bounding_box(a_ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
_UpperCAmelCase = ["score", "label", "box"]
_UpperCAmelCase = [
dict(zip(a_ , a_ ) )
for vals in zip(raw_annotation["scores"] , raw_annotation["labels"] , raw_annotation["boxes"] )
]
return annotation
def _a ( self , a_ ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError("The ObjectDetectionPipeline is only available in PyTorch." )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = box.int().tolist()
_UpperCAmelCase = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 657 | 0 |
'''simple docstring'''
import logging
import os
from .state import PartialState
class SCREAMING_SNAKE_CASE (logging.LoggerAdapter ):
@staticmethod
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase):
'''simple docstring'''
__A : Any = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
if PartialState._shared_state == {}:
raise RuntimeError(
'You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.')
__A : List[Any] = kwargs.pop('main_process_only' , _UpperCAmelCase)
__A : str = kwargs.pop('in_order' , _UpperCAmelCase)
if self.isEnabledFor(_UpperCAmelCase):
if self._should_log(_UpperCAmelCase):
__A ,__A : Dict = self.process(_UpperCAmelCase , _UpperCAmelCase)
self.logger.log(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase)
elif in_order:
__A : Tuple = PartialState()
for i in range(state.num_processes):
if i == state.process_index:
__A ,__A : Union[str, Any] = self.process(_UpperCAmelCase , _UpperCAmelCase)
self.logger.log(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase)
state.wait_for_everyone()
def _lowerCAmelCase ( __snake_case : str , __snake_case : str = None ) -> str:
if log_level is None:
__A : List[Any] = os.environ.get('ACCELERATE_LOG_LEVEL' , __snake_case )
__A : Dict = logging.getLogger(__snake_case )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(__snake_case , {} ) | 8 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def merge(UpperCamelCase__ , UpperCamelCase__ ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(UpperCamelCase__ ) <= 1:
return collection
_UpperCAmelCase = len(UpperCamelCase__ ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__magic_name__ = input('''Enter numbers separated by a comma:\n''').strip()
__magic_name__ = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 657 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/resolve/main/config.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/config.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/config.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json''',
}
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Optional[int] = "bloom"
A__ : Union[str, Any] = ["past_key_values"]
A__ : int = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__( self : Dict , _snake_case : Tuple=25_08_80 , _snake_case : Dict=64 , _snake_case : Optional[int]=2 , _snake_case : int=8 , _snake_case : Optional[int]=1E-5 , _snake_case : List[Any]=0.02 , _snake_case : Optional[Any]=True , _snake_case : Union[str, Any]=1 , _snake_case : List[Any]=2 , _snake_case : Optional[int]=False , _snake_case : Union[str, Any]=0.0 , _snake_case : Union[str, Any]=0.0 , _snake_case : int=1 , _snake_case : List[Any]=False , **_snake_case : str , ):
"""simple docstring"""
A__ = vocab_size
# Backward compatibility with n_embed kwarg
A__ = kwargs.pop('n_embed' , _snake_case )
A__ = hidden_size if n_embed is None else n_embed
A__ = n_layer
A__ = n_head
A__ = layer_norm_epsilon
A__ = initializer_range
A__ = use_cache
A__ = pretraining_tp
A__ = apply_residual_connection_post_layernorm
A__ = hidden_dropout
A__ = attention_dropout
A__ = bos_token_id
A__ = eos_token_id
A__ = slow_but_exact
super().__init__(bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : List[Any] = version.parse("1.12" )
def __init__( self : str , _snake_case : PretrainedConfig , _snake_case : str = "default" , _snake_case : List[PatchingSpec] = None , _snake_case : bool = False , ):
"""simple docstring"""
super().__init__(_snake_case , task=_snake_case , patching_specs=_snake_case , use_past=_snake_case )
if not getattr(self._config , 'pad_token_id' , _snake_case ):
# TODO: how to do that better?
A__ = 0
@property
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(_snake_case , direction='inputs' , inverted_values_shape=_snake_case )
A__ = {0: 'batch', 1: 'past_sequence + sequence'}
else:
A__ = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def _a ( self : List[Any] ):
"""simple docstring"""
return self._config.n_layer
@property
def _a ( self : Union[str, Any] ):
"""simple docstring"""
return self._config.n_head
@property
def _a ( self : Union[str, Any] ):
"""simple docstring"""
return 1E-3
def _a ( self : Dict , _snake_case : "PreTrainedTokenizer" , _snake_case : int = -1 , _snake_case : int = -1 , _snake_case : bool = False , _snake_case : Optional["TensorType"] = None , ):
"""simple docstring"""
A__ = super(_snake_case , self ).generate_dummy_inputs(
_snake_case , batch_size=_snake_case , seq_length=_snake_case , is_pair=_snake_case , framework=_snake_case )
# We need to order the input in the way they appears in the forward()
A__ = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
A__ , A__ = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
A__ = seqlen + 2
A__ = self._config.hidden_size // self.num_attention_heads
A__ = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
A__ = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
A__ = [
(torch.zeros(_snake_case ), torch.zeros(_snake_case )) for _ in range(self.num_layers )
]
A__ = common_inputs['attention_mask']
if self.use_past:
A__ = ordered_inputs['attention_mask'].dtype
A__ = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(_snake_case , _snake_case , dtype=_snake_case )] , dim=1 )
return ordered_inputs
@property
def _a ( self : Optional[Any] ):
"""simple docstring"""
return 13
| 9 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _lowerCAmelCase :
def __init__( self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ) -> List[str]:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = self.vocab_size - 1
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
_UpperCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> Optional[int]:
_UpperCAmelCase = OpenAIGPTModel(config=a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(a_ , token_type_ids=a_ , head_mask=a_ )
_UpperCAmelCase = model(a_ , token_type_ids=a_ )
_UpperCAmelCase = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> List[Any]:
_UpperCAmelCase = OpenAIGPTLMHeadModel(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> Optional[Any]:
_UpperCAmelCase = OpenAIGPTDoubleHeadsModel(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> Dict:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = OpenAIGPTForSequenceClassification(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase_ : Any = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowercase_ : Optional[Any] = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowercase_ : Union[str, Any] = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _a ( self , a_ , a_ , a_ , a_ , a_ ) -> Any:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _a ( self , a_ , a_ , a_=False ) -> Optional[int]:
_UpperCAmelCase = super()._prepare_for_class(a_ , a_ , return_labels=a_ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=a_ , )
_UpperCAmelCase = inputs_dict["labels"]
_UpperCAmelCase = inputs_dict["labels"]
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=a_ , )
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
return inputs_dict
def _a ( self ) -> Optional[int]:
_UpperCAmelCase = OpenAIGPTModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=a_ , n_embd=37 )
def _a ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _a ( self ) -> Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*a_ )
def _a ( self ) -> Tuple:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*a_ )
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*a_ )
@slow
def _a ( self ) -> int:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = OpenAIGPTModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def _a ( self ) -> Any:
_UpperCAmelCase = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" )
model.to(a_ )
_UpperCAmelCase = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=a_ ) # the president is
_UpperCAmelCase = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
_UpperCAmelCase = model.generate(a_ , do_sample=a_ )
self.assertListEqual(output_ids[0].tolist() , a_ )
| 657 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"microsoft/unispeech-sat-base-100h-libri-ft": (
"https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "unispeech-sat"
def __init__( self : List[Any] , _A : Dict=32 , _A : int=768 , _A : str=12 , _A : str=12 , _A : Any=3072 , _A : List[str]="gelu" , _A : Any=0.1 , _A : Union[str, Any]=0.1 , _A : Optional[int]=0.1 , _A : Optional[Any]=0.0 , _A : List[str]=0.0 , _A : Optional[Any]=0.1 , _A : str=0.1 , _A : List[str]=0.02 , _A : Optional[int]=1e-5 , _A : Dict="group" , _A : str="gelu" , _A : List[str]=(512, 512, 512, 512, 512, 512, 512) , _A : Any=(5, 2, 2, 2, 2, 2, 2) , _A : Dict=(10, 3, 3, 3, 3, 2, 2) , _A : Union[str, Any]=False , _A : str=128 , _A : Tuple=16 , _A : Optional[int]=False , _A : Dict=True , _A : Optional[Any]=0.05 , _A : Any=10 , _A : str=2 , _A : Dict=0.0 , _A : List[str]=10 , _A : Union[str, Any]=0 , _A : List[str]=320 , _A : List[Any]=2 , _A : Optional[Any]=0.1 , _A : Optional[Any]=100 , _A : List[str]=256 , _A : Any=256 , _A : List[Any]=0.1 , _A : Dict="mean" , _A : Dict=False , _A : List[str]=False , _A : List[Any]=256 , _A : Any=(512, 512, 512, 512, 1500) , _A : Any=(5, 3, 3, 1, 1) , _A : Dict=(1, 2, 3, 1, 1) , _A : str=512 , _A : Dict=0 , _A : List[str]=1 , _A : Tuple=2 , _A : Optional[Any]=504 , **_A : int , ):
super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A )
_UpperCamelCase = hidden_size
_UpperCamelCase = feat_extract_norm
_UpperCamelCase = feat_extract_activation
_UpperCamelCase = list(_A )
_UpperCamelCase = list(_A )
_UpperCamelCase = list(_A )
_UpperCamelCase = conv_bias
_UpperCamelCase = num_conv_pos_embeddings
_UpperCamelCase = num_conv_pos_embedding_groups
_UpperCamelCase = len(self.conv_dim )
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = feat_proj_dropout
_UpperCamelCase = final_dropout
_UpperCamelCase = layerdrop
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = initializer_range
_UpperCamelCase = vocab_size
_UpperCamelCase = num_clusters
_UpperCamelCase = do_stable_layer_norm
_UpperCamelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCamelCase = apply_spec_augment
_UpperCamelCase = mask_time_prob
_UpperCamelCase = mask_time_length
_UpperCamelCase = mask_time_min_masks
_UpperCamelCase = mask_feature_prob
_UpperCamelCase = mask_feature_length
_UpperCamelCase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_UpperCamelCase = num_codevectors_per_group
_UpperCamelCase = num_codevector_groups
_UpperCamelCase = contrastive_logits_temperature
_UpperCamelCase = feat_quantizer_dropout
_UpperCamelCase = num_negatives
_UpperCamelCase = codevector_dim
_UpperCamelCase = proj_codevector_dim
_UpperCamelCase = diversity_loss_weight
# ctc loss
_UpperCamelCase = ctc_loss_reduction
_UpperCamelCase = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_UpperCamelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_UpperCamelCase = list(_A )
_UpperCamelCase = list(_A )
_UpperCamelCase = list(_A )
_UpperCamelCase = xvector_output_dim
@property
def UpperCamelCase_ ( self : List[str] ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 10 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__=10 ):
"""simple docstring"""
_UpperCAmelCase = []
for _ in range(UpperCamelCase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__=10 ):
"""simple docstring"""
_UpperCAmelCase = []
for step in range(UpperCamelCase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = os.path.join(UpperCamelCase__ , "schedule.bin" )
torch.save(scheduler.state_dict() , UpperCamelCase__ )
_UpperCAmelCase = torch.load(UpperCamelCase__ )
scheduler.load_state_dict(UpperCamelCase__ )
return lrs
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def _a ( self , a_ , a_ , a_ ) -> Optional[int]:
self.assertEqual(len(a_ ) , len(a_ ) )
for a, b in zip(a_ , a_ ):
self.assertAlmostEqual(a_ , a_ , delta=a_ )
def _a ( self ) -> str:
_UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=a_ )
_UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
_UpperCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_UpperCAmelCase = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(100 ):
_UpperCAmelCase = criterion(a_ , a_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=a_ )
_UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
_UpperCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_UpperCAmelCase = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=a_ , weight_decay=0.0 , relative_step=a_ , scale_parameter=a_ , warmup_init=a_ , )
for _ in range(1000 ):
_UpperCAmelCase = criterion(a_ , a_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
lowercase_ : List[Any] = nn.Linear(50 , 50 ) if is_torch_available() else None
lowercase_ : Tuple = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
lowercase_ : Dict = 10
def _a ( self , a_ , a_ , a_ , a_=None ) -> Union[str, Any]:
self.assertEqual(len(a_ ) , len(a_ ) )
for a, b in zip(a_ , a_ ):
self.assertAlmostEqual(a_ , a_ , delta=a_ , msg=a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
_UpperCAmelCase = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
_UpperCAmelCase , _UpperCAmelCase = data
_UpperCAmelCase = scheduler_func(self.optimizer , **a_ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
_UpperCAmelCase = unwrap_schedule(a_ , self.num_steps )
self.assertListAlmostEqual(
a_ , a_ , tol=1e-2 , msg=f"failed for {scheduler_func} in normal scheduler" , )
_UpperCAmelCase = scheduler_func(self.optimizer , **a_ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(a_ ) # wrap to test picklability of the schedule
_UpperCAmelCase = unwrap_and_save_reload_schedule(a_ , self.num_steps )
self.assertListEqual(a_ , a_ , msg=f"failed for {scheduler_func} in save and reload" )
class _lowerCAmelCase :
def __init__( self , a_ ) -> Union[str, Any]:
_UpperCAmelCase = fn
def __call__( self , *a_ , **a_ ) -> Union[str, Any]:
return self.fn(*a_ , **a_ )
@classmethod
def _a ( self , a_ ) -> Dict:
_UpperCAmelCase = list(map(self , scheduler.lr_lambdas ) )
| 657 | 0 |
'''simple docstring'''
def lowerCAmelCase (__A = 4_000_000):
"""simple docstring"""
_a = []
_a , _a = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__A)
_a , _a = b, a + b
return sum(__A)
if __name__ == "__main__":
print(F"""{solution() = }""")
| 11 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __lowerCamelCase ( UpperCamelCase__=None ):
"""simple docstring"""
if subparsers is not None:
_UpperCAmelCase = subparsers.add_parser("test" )
else:
_UpperCAmelCase = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=UpperCamelCase__ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase__ )
return parser
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
_UpperCAmelCase = script_name
else:
_UpperCAmelCase = f"--config_file={args.config_file} {script_name}"
_UpperCAmelCase = ["accelerate-launch"] + test_args.split()
_UpperCAmelCase = execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = test_command_parser()
_UpperCAmelCase = parser.parse_args()
test_command(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 657 | 0 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class _snake_case :
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=14 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=0.0_2 , ):
'''simple docstring'''
lowercase__ : str = parent
lowercase__ : Optional[int] = batch_size
lowercase__ : Optional[int] = seq_length
lowercase__ : Union[str, Any] = is_training
lowercase__ : Any = use_input_mask
lowercase__ : Optional[int] = use_token_type_ids
lowercase__ : Optional[Any] = use_labels
lowercase__ : Optional[int] = vocab_size
lowercase__ : Optional[Any] = hidden_size
lowercase__ : Any = rotary_dim
lowercase__ : Optional[Any] = num_hidden_layers
lowercase__ : Tuple = num_attention_heads
lowercase__ : Tuple = intermediate_size
lowercase__ : List[str] = hidden_act
lowercase__ : Optional[Any] = hidden_dropout_prob
lowercase__ : int = attention_probs_dropout_prob
lowercase__ : Any = max_position_embeddings
lowercase__ : Optional[int] = initializer_range
lowercase__ : Optional[int] = None
lowercase__ : str = vocab_size - 1
lowercase__ : Any = vocab_size - 1
lowercase__ : Dict = vocab_size - 1
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase__ : Any = None
if self.use_input_mask:
lowercase__ : Dict = random_attention_mask([self.batch_size, self.seq_length])
lowercase__ : List[Any] = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=SCREAMING_SNAKE_CASE_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Optional[Any] = config_and_inputs
lowercase__ : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = 20
lowercase__ : int = model_class_name(SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = model.init_cache(input_ids.shape[0] , SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""")
lowercase__ : Tuple = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1)[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1))
lowercase__ : List[str] = model(
input_ids[:, :-1] , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , position_ids=SCREAMING_SNAKE_CASE_ , )
lowercase__ : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase__ : str = model(
input_ids[:, -1:] , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=outputs_cache.past_key_values , position_ids=SCREAMING_SNAKE_CASE_ , )
lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}')
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Union[str, Any] = 20
lowercase__ : List[Any] = model_class_name(SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]))] , axis=-1 , )
lowercase__ : Dict = model.init_cache(input_ids.shape[0] , SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1)[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1))
lowercase__ : Any = model(
input_ids[:, :-1] , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , position_ids=SCREAMING_SNAKE_CASE_ , )
lowercase__ : int = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase__ : Tuple = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=SCREAMING_SNAKE_CASE_ , position_ids=SCREAMING_SNAKE_CASE_ , )
lowercase__ : str = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_)
lowercase__ : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}')
@require_flax
class _snake_case ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
__lowerCAmelCase : Dict = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
__lowerCAmelCase : str = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[str] = FlaxGPTJModelTester(self)
def lowercase__ ( self):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase__ , lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase__ , lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
@tooslow
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[Any] = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""")
lowercase__ : List[str] = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""")
lowercase__ : Optional[Any] = False
lowercase__ : List[str] = model.config.eos_token_id
lowercase__ : List[Any] = jax.jit(model.generate)
lowercase__ : Tuple = jit_generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id).sequences
lowercase__ : List[str] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = [
"""Hello this is a long string of text.\n\nI'm trying to get the text of the""",
"""Hey, I'm a little late to the party. I'm going to""",
]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
@is_pt_flax_cross_test
def lowercase__ ( self):
'''simple docstring'''
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
# prepare inputs
lowercase__ : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : Any = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowercase__ : int = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowercase__ : str = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ , lowercase__ : Dict = pt_inputs["""input_ids"""].shape
lowercase__ : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,))
for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE_):
lowercase__ : str = 0
lowercase__ : List[Any] = 1
lowercase__ : Dict = 0
lowercase__ : Any = 1
lowercase__ : List[Any] = pt_model_class(SCREAMING_SNAKE_CASE_).eval()
lowercase__ : Optional[int] = model_class(SCREAMING_SNAKE_CASE_ , dtype=jnp.floataa)
lowercase__ : List[str] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = fx_state
with torch.no_grad():
lowercase__ : Optional[int] = pt_model(**SCREAMING_SNAKE_CASE_).to_tuple()
lowercase__ : Dict = fx_model(**SCREAMING_SNAKE_CASE_).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_) , """Output lengths differ between Flax and PyTorch""")
for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2)
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = model_class.from_pretrained(SCREAMING_SNAKE_CASE_ , from_pt=SCREAMING_SNAKE_CASE_)
lowercase__ : str = fx_model_loaded(**SCREAMING_SNAKE_CASE_).to_tuple()
self.assertEqual(
len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_) , """Output lengths differ between Flax and PyTorch""")
for fx_output_loaded, pt_output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2)
@is_pt_flax_cross_test
def lowercase__ ( self):
'''simple docstring'''
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
# prepare inputs
lowercase__ : Tuple = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : str = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowercase__ : int = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowercase__ : Optional[int] = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : str = pt_model_class(SCREAMING_SNAKE_CASE_).eval()
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ , dtype=jnp.floataa)
lowercase__ : Optional[int] = load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE_ , fx_model.params)
lowercase__ , lowercase__ : str = pt_inputs["""input_ids"""].shape
lowercase__ : List[Any] = np.random.randint(0 , seq_length - 1 , size=(batch_size,))
for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE_):
lowercase__ : Tuple = 0
lowercase__ : int = 1
lowercase__ : str = 0
lowercase__ : str = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
lowercase__ : Dict = pt_model(**SCREAMING_SNAKE_CASE_).to_tuple()
lowercase__ : Optional[Any] = fx_model(**SCREAMING_SNAKE_CASE_).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_) , """Output lengths differ between Flax and PyTorch""")
for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2)
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = pt_model_class.from_pretrained(SCREAMING_SNAKE_CASE_ , from_flax=SCREAMING_SNAKE_CASE_)
with torch.no_grad():
lowercase__ : Tuple = pt_model_loaded(**SCREAMING_SNAKE_CASE_).to_tuple()
self.assertEqual(
len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_) , """Output lengths differ between Flax and PyTorch""")
for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2)
@tooslow
def lowercase__ ( self):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase__ : Any = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""")
lowercase__ : int = model(np.ones((1, 1)))
self.assertIsNotNone(SCREAMING_SNAKE_CASE_)
| 12 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
return 10 - x * x
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if equation(UpperCamelCase__ ) * equation(UpperCamelCase__ ) >= 0:
raise ValueError("Wrong space!" )
_UpperCAmelCase = a
while (b - a) >= 0.01:
# Find middle point
_UpperCAmelCase = (a + b) / 2
# Check if middle point is root
if equation(UpperCamelCase__ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(UpperCamelCase__ ) * equation(UpperCamelCase__ ) < 0:
_UpperCAmelCase = c
else:
_UpperCAmelCase = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 657 | 0 |
'''simple docstring'''
A__ : dict[tuple[int, int, int], int] = {}
def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int:
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
__lowerCamelCase : List[Any] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
__lowerCamelCase : Tuple = _calculate(days - 1 , UpperCAmelCase_ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
__lowerCamelCase : int = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
__lowerCamelCase : List[Any] = _calculate(days - 1 , UpperCAmelCase_ , 0 )
__lowerCamelCase : Optional[int] = state_late + state_absent + state_ontime
__lowerCamelCase : Union[str, Any] = prizestrings
return prizestrings
def UpperCAmelCase__ ( UpperCAmelCase_ : int = 30 ) -> int:
return _calculate(UpperCAmelCase_ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 13 |
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
lowercase_ : Tuple = [r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self , a_ , a_ , a_ = None , a_ = 50257 , a_ = 1024 , a_ = 768 , a_ = 12 , a_ = 12 , a_ = None , a_ = "gelu_new" , a_ = 0.1 , a_ = 0.1 , a_ = 0.1 , a_ = 1e-5 , a_ = 0.02 , a_ = True , a_ = True , a_ = False , a_ = False , ) -> List[str]:
super().__init__()
_UpperCAmelCase = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"
f" `n_embd`: {n_embd} are not equal." )
_UpperCAmelCase = prefix_inner_dim
_UpperCAmelCase = prefix_hidden_dim
_UpperCAmelCase = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
_UpperCAmelCase = (
nn.Linear(self.prefix_hidden_dim , a_ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
_UpperCAmelCase = GPTaConfig(
vocab_size=a_ , n_positions=a_ , n_embd=a_ , n_layer=a_ , n_head=a_ , n_inner=a_ , activation_function=a_ , resid_pdrop=a_ , embd_pdrop=a_ , attn_pdrop=a_ , layer_norm_epsilon=a_ , initializer_range=a_ , scale_attn_weights=a_ , use_cache=a_ , scale_attn_by_inverse_layer_idx=a_ , reorder_and_upcast_attn=a_ , )
_UpperCAmelCase = GPTaLMHeadModel(a_ )
def _a ( self , a_ , a_ , a_ = None , a_ = None , ) -> Tuple:
_UpperCAmelCase = self.transformer.transformer.wte(a_ )
_UpperCAmelCase = self.encode_prefix(a_ )
_UpperCAmelCase = self.decode_prefix(a_ )
_UpperCAmelCase = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
_UpperCAmelCase = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
_UpperCAmelCase = torch.cat((dummy_token, input_ids) , dim=1 )
_UpperCAmelCase = self.transformer(inputs_embeds=a_ , labels=a_ , attention_mask=a_ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def _a ( self , a_ , a_ ) -> torch.Tensor:
return torch.zeros(a_ , self.prefix_length , dtype=torch.intaa , device=a_ )
def _a ( self , a_ ) -> Union[str, Any]:
return self.encode_prefix(a_ )
@torch.no_grad()
def _a ( self , a_ , a_ , a_ ) -> Union[str, Any]:
_UpperCAmelCase = torch.split(a_ , 1 , dim=0 )
_UpperCAmelCase = []
_UpperCAmelCase = []
for feature in features:
_UpperCAmelCase = self.decode_prefix(feature.to(a_ ) ) # back to the clip feature
# Only support beam search for now
_UpperCAmelCase , _UpperCAmelCase = self.generate_beam(
input_embeds=a_ , device=a_ , eos_token_id=a_ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
_UpperCAmelCase = torch.stack(a_ )
_UpperCAmelCase = torch.stack(a_ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def _a ( self , a_=None , a_=None , a_=None , a_ = 5 , a_ = 67 , a_ = 1.0 , a_ = None , ) -> Optional[Any]:
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = torch.ones(a_ , device=a_ , dtype=torch.int )
_UpperCAmelCase = torch.zeros(a_ , device=a_ , dtype=torch.bool )
if input_embeds is not None:
_UpperCAmelCase = input_embeds
else:
_UpperCAmelCase = self.transformer.transformer.wte(a_ )
for i in range(a_ ):
_UpperCAmelCase = self.transformer(inputs_embeds=a_ )
_UpperCAmelCase = outputs.logits
_UpperCAmelCase = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
_UpperCAmelCase = logits.softmax(-1 ).log()
if scores is None:
_UpperCAmelCase , _UpperCAmelCase = logits.topk(a_ , -1 )
_UpperCAmelCase = generated.expand(a_ , *generated.shape[1:] )
_UpperCAmelCase , _UpperCAmelCase = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
_UpperCAmelCase = next_tokens
else:
_UpperCAmelCase = tokens.expand(a_ , *tokens.shape[1:] )
_UpperCAmelCase = torch.cat((tokens, next_tokens) , dim=1 )
else:
_UpperCAmelCase = -float(np.inf )
_UpperCAmelCase = 0
_UpperCAmelCase = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
_UpperCAmelCase = scores_sum / seq_lengths[:, None]
_UpperCAmelCase , _UpperCAmelCase = scores_sum_average.view(-1 ).topk(a_ , -1 )
_UpperCAmelCase = next_tokens // scores_sum.shape[1]
_UpperCAmelCase = seq_lengths[next_tokens_source]
_UpperCAmelCase = next_tokens % scores_sum.shape[1]
_UpperCAmelCase = next_tokens.unsqueeze(1 )
_UpperCAmelCase = tokens[next_tokens_source]
_UpperCAmelCase = torch.cat((tokens, next_tokens) , dim=1 )
_UpperCAmelCase = generated[next_tokens_source]
_UpperCAmelCase = scores_sum_average * seq_lengths
_UpperCAmelCase = is_stopped[next_tokens_source]
_UpperCAmelCase = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
_UpperCAmelCase = torch.cat((generated, next_token_embed) , dim=1 )
_UpperCAmelCase = is_stopped + next_tokens.eq(a_ ).squeeze()
if is_stopped.all():
break
_UpperCAmelCase = scores / seq_lengths
_UpperCAmelCase = scores.argsort(descending=a_ )
# tokens tensors are already padded to max_seq_length
_UpperCAmelCase = [tokens[i] for i in order]
_UpperCAmelCase = torch.stack(a_ , dim=0 )
_UpperCAmelCase = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 657 | 0 |
from __future__ import annotations
import math
def __UpperCAmelCase ( __a : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(__a ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
a__ = [num for num in range(3, 100001, 2) if not is_prime(num)]
def __UpperCAmelCase ( __a : int ) -> list[int]:
"""simple docstring"""
if not isinstance(__a ,__a ):
raise ValueError('''n must be an integer''' )
if n <= 0:
raise ValueError('''n must be >= 0''' )
_a : Dict = []
for num in range(len(__a ) ):
_a : Optional[Any] = 0
while 2 * i * i <= odd_composites[num]:
_a : Optional[int] = odd_composites[num] - 2 * i * i
if is_prime(__a ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(__a ) == n:
return list_nums
return []
def __UpperCAmelCase ( ) -> int:
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 14 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__magic_name__ = {'''configuration_gpt_neox''': ['''GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXConfig''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''GPTNeoXTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXForCausalLM''',
'''GPTNeoXForQuestionAnswering''',
'''GPTNeoXForSequenceClassification''',
'''GPTNeoXForTokenClassification''',
'''GPTNeoXLayer''',
'''GPTNeoXModel''',
'''GPTNeoXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 0 |
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def UpperCamelCase ( __magic_name__ : Union[dict, list, tuple, torch.Tensor] ) -> List[Tuple[int, ...]]:
"""simple docstring"""
lowercase__ = []
if isinstance(__magic_name__ , __magic_name__ ):
for v in tree.values():
shapes.extend(_fetch_dims(__magic_name__ ) )
elif isinstance(__magic_name__ , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(__magic_name__ ) )
elif isinstance(__magic_name__ , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("""Not supported""" )
return shapes
@torch.jit.ignore
def UpperCamelCase ( __magic_name__ : int , __magic_name__ : Tuple[int, ...] ) -> Tuple[int, ...]:
"""simple docstring"""
lowercase__ = []
for d in reversed(__magic_name__ ):
idx.append(flat_idx % d )
lowercase__ = flat_idx // d
return tuple(reversed(__magic_name__ ) )
@torch.jit.ignore
def UpperCamelCase ( __magic_name__ : Sequence[int] , __magic_name__ : Sequence[int] , __magic_name__ : Sequence[int] , __magic_name__ : Optional[Sequence[bool]] = None , __magic_name__ : Optional[Sequence[bool]] = None , ) -> List[Tuple[slice, ...]]:
"""simple docstring"""
def reduce_edge_list(__magic_name__ : List[bool] ) -> None:
lowercase__ = True
for i in range(len(__magic_name__ ) ):
lowercase__ = -1 * (i + 1)
l[reversed_idx] &= tally
lowercase__ = l[reversed_idx]
if start_edges is None:
lowercase__ = [s == 0 for s in start]
reduce_edge_list(__magic_name__ )
if end_edges is None:
lowercase__ = [e == (d - 1) for e, d in zip(__magic_name__ , __magic_name__ )]
reduce_edge_list(__magic_name__ )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(__magic_name__ ) == 0:
return [()]
elif len(__magic_name__ ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
lowercase__ = []
lowercase__ = []
# Dimensions common to start and end can be selected directly
for s, e in zip(__magic_name__ , __magic_name__ ):
if s == e:
path_list.append(slice(__magic_name__ , s + 1 ) )
else:
break
lowercase__ = tuple(__magic_name__ )
lowercase__ = len(__magic_name__ )
# start == end, and we're done
if divergence_idx == len(__magic_name__ ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
lowercase__ = start[divergence_idx]
return tuple(
path + (slice(__magic_name__ , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
lowercase__ = end[divergence_idx]
return tuple(
path + (slice(__magic_name__ , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
lowercase__ = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def UpperCamelCase ( __magic_name__ : torch.Tensor , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int ) -> torch.Tensor:
"""simple docstring"""
lowercase__ = t.shape[:no_batch_dims]
lowercase__ = list(_flat_idx_to_idx(__magic_name__ , __magic_name__ ) )
# _get_minimal_slice_set is inclusive
lowercase__ = list(_flat_idx_to_idx(flat_end - 1 , __magic_name__ ) )
# Get an ordered list of slices to perform
lowercase__ = _get_minimal_slice_set(
__magic_name__ , __magic_name__ , __magic_name__ , )
lowercase__ = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def UpperCamelCase ( __magic_name__ : Callable , __magic_name__ : Dict[str, Any] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : bool = False , __magic_name__ : Any = None , __magic_name__ : bool = False , ) -> Any:
"""simple docstring"""
if not (len(__magic_name__ ) > 0):
raise ValueError("""Must provide at least one input""" )
lowercase__ = [shape[:no_batch_dims] for shape in _fetch_dims(__magic_name__ )]
lowercase__ = tuple([max(__magic_name__ ) for s in zip(*__magic_name__ )] )
def _prep_inputs(__magic_name__ : torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
lowercase__ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
lowercase__ = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
lowercase__ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
lowercase__ = tensor_tree_map(_prep_inputs , __magic_name__ )
lowercase__ = None
if _out is not None:
lowercase__ = tensor_tree_map(lambda __magic_name__ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
lowercase__ = 1
for d in orig_batch_dims:
flat_batch_dim *= d
lowercase__ = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(__magic_name__ : torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
lowercase__ = 0
lowercase__ = prepped_outputs
for _ in range(__magic_name__ ):
# Chunk the input
if not low_mem:
lowercase__ = _select_chunk
else:
lowercase__ = partial(
_chunk_slice , flat_start=__magic_name__ , flat_end=min(__magic_name__ , i + chunk_size ) , no_batch_dims=len(__magic_name__ ) , )
lowercase__ = tensor_tree_map(__magic_name__ , __magic_name__ )
# Run the layer on the chunk
lowercase__ = layer(**__magic_name__ )
# Allocate space for the output
if out is None:
lowercase__ = tensor_tree_map(lambda __magic_name__ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , __magic_name__ )
# Put the chunk in its pre-allocated space
if isinstance(__magic_name__ , __magic_name__ ):
def assign(__magic_name__ : dict , __magic_name__ : dict ) -> None:
for k, v in da.items():
if isinstance(__magic_name__ , __magic_name__ ):
assign(__magic_name__ , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
lowercase__ = da[k]
assign(__magic_name__ , __magic_name__ )
elif isinstance(__magic_name__ , __magic_name__ ):
for xa, xa in zip(__magic_name__ , __magic_name__ ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
lowercase__ = xa
elif isinstance(__magic_name__ , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
lowercase__ = output_chunk
else:
raise ValueError("""Not supported""" )
i += chunk_size
lowercase__ = tensor_tree_map(lambda __magic_name__ : t.view(orig_batch_dims + t.shape[1:] ) , __magic_name__ )
return out
class A :
'''simple docstring'''
def __init__(self : List[str] , _UpperCAmelCase : int = 512 , ) -> str:
"""simple docstring"""
lowercase__ = max_chunk_size
lowercase__ = None
lowercase__ = None
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : Callable , _UpperCAmelCase : tuple , _UpperCAmelCase : int ) -> int:
"""simple docstring"""
logging.info("""Tuning chunk size...""" )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
lowercase__ = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
lowercase__ = [c for c in candidates if c > min_chunk_size]
lowercase__ = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(_UpperCAmelCase : int ) -> bool:
try:
with torch.no_grad():
fn(*_UpperCAmelCase , chunk_size=_UpperCAmelCase )
return True
except RuntimeError:
return False
lowercase__ = 0
lowercase__ = len(_UpperCAmelCase ) - 1
while i > min_viable_chunk_size_index:
lowercase__ = test_chunk_size(candidates[i] )
if not viable:
lowercase__ = (min_viable_chunk_size_index + i) // 2
else:
lowercase__ = i
lowercase__ = (i + len(_UpperCAmelCase ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def lowerCamelCase__ (self : Dict , _UpperCAmelCase : Iterable , _UpperCAmelCase : Iterable ) -> bool:
"""simple docstring"""
lowercase__ = True
for aa, aa in zip(_UpperCAmelCase , _UpperCAmelCase ):
assert type(_UpperCAmelCase ) == type(_UpperCAmelCase )
if isinstance(_UpperCAmelCase , (list, tuple) ):
consistent &= self._compare_arg_caches(_UpperCAmelCase , _UpperCAmelCase )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase__ = [v for _, v in sorted(aa.items() , key=lambda _UpperCAmelCase : x[0] )]
lowercase__ = [v for _, v in sorted(aa.items() , key=lambda _UpperCAmelCase : x[0] )]
consistent &= self._compare_arg_caches(_UpperCAmelCase , _UpperCAmelCase )
else:
consistent &= aa == aa
return consistent
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : Callable , _UpperCAmelCase : tuple , _UpperCAmelCase : int , ) -> int:
"""simple docstring"""
lowercase__ = True
lowercase__ = tree_map(lambda _UpperCAmelCase : a.shape if isinstance(_UpperCAmelCase , torch.Tensor ) else a , _UpperCAmelCase , _UpperCAmelCase )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(_UpperCAmelCase )
lowercase__ = self._compare_arg_caches(self.cached_arg_data , _UpperCAmelCase )
else:
# Otherwise, we can reuse the precomputed value
lowercase__ = False
if not consistent:
lowercase__ = self._determine_favorable_chunk_size(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
lowercase__ = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 15 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : Union[str, Any] = '''convbert'''
def __init__( self , a_=30522 , a_=768 , a_=12 , a_=12 , a_=3072 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=2 , a_=0.02 , a_=1e-12 , a_=1 , a_=0 , a_=2 , a_=768 , a_=2 , a_=9 , a_=1 , a_=None , **a_ , ) -> Tuple:
super().__init__(
pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ , )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = embedding_size
_UpperCAmelCase = head_ratio
_UpperCAmelCase = conv_kernel_size
_UpperCAmelCase = num_groups
_UpperCAmelCase = classifier_dropout
class _lowerCAmelCase ( lowerCamelCase ):
@property
def _a ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_UpperCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 657 | 0 |
from __future__ import annotations
import math
def __a ( A__ : int ):
if num <= 0:
SCREAMING_SNAKE_CASE = F"{num}: Invalid input, please enter a positive integer."
raise ValueError(A__ )
SCREAMING_SNAKE_CASE = [True] * (num + 1)
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = int(math.sqrt(A__ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(A__ )
# Set multiples of start be False
for i in range(start * start , num + 1 , A__ ):
if sieve[i] is True:
SCREAMING_SNAKE_CASE = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(A__ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('Enter a positive integer: ').strip()))) | 16 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
return "".join([hex(UpperCamelCase__ )[2:].zfill(2 ).upper() for byte in list(UpperCamelCase__ )] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if (len(UpperCamelCase__ ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(UpperCamelCase__ ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(UpperCamelCase__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 | 0 |
def __SCREAMING_SNAKE_CASE ( a__ : str ) -> str:
return " ".join(
"""""".join(word[::-1] ) if len(a__ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 17 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
try:
_UpperCAmelCase = float(UpperCamelCase__ )
except ValueError:
raise ValueError("Please enter a valid number" )
_UpperCAmelCase = decimal - int(UpperCamelCase__ )
if fractional_part == 0:
return int(UpperCamelCase__ ), 1
else:
_UpperCAmelCase = len(str(UpperCamelCase__ ).split("." )[1] )
_UpperCAmelCase = int(decimal * (10**number_of_frac_digits) )
_UpperCAmelCase = 10**number_of_frac_digits
_UpperCAmelCase , _UpperCAmelCase = denominator, numerator
while True:
_UpperCAmelCase = dividend % divisor
if remainder == 0:
break
_UpperCAmelCase , _UpperCAmelCase = divisor, remainder
_UpperCAmelCase , _UpperCAmelCase = numerator / divisor, denominator / divisor
return int(UpperCamelCase__ ), int(UpperCamelCase__ )
if __name__ == "__main__":
print(f'''{decimal_to_fraction(2) = }''')
print(f'''{decimal_to_fraction(89.0) = }''')
print(f'''{decimal_to_fraction("67") = }''')
print(f'''{decimal_to_fraction("45.0") = }''')
print(f'''{decimal_to_fraction(1.5) = }''')
print(f'''{decimal_to_fraction("6.25") = }''')
print(f'''{decimal_to_fraction("78td") = }''')
| 657 | 0 |
'''simple docstring'''
from sklearn.metrics import fa_score
import datasets
_SCREAMING_SNAKE_CASE = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n"
_SCREAMING_SNAKE_CASE = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n"
_SCREAMING_SNAKE_CASE = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
def _snake_case ( self ) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"] , )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=1 , _lowerCAmelCase="binary" , _lowerCAmelCase=None ) -> Optional[Any]:
_lowerCAmelCase = fa_score(
_lowerCAmelCase , _lowerCAmelCase , labels=_lowerCAmelCase , pos_label=_lowerCAmelCase , average=_lowerCAmelCase , sample_weight=_lowerCAmelCase )
return {"f1": float(_lowerCAmelCase ) if score.size == 1 else score}
| 18 |
"""simple docstring"""
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_UpperCAmelCase = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
_UpperCAmelCase = f"{src_lang}-{tgt_lang}"
_UpperCAmelCase = f"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n"
model_card_dir.mkdir(parents=UpperCamelCase__ , exist_ok=UpperCamelCase__ )
_UpperCAmelCase = os.path.join(UpperCamelCase__ , "README.md" )
print(f"Generating {path}" )
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as f:
f.write(UpperCamelCase__ )
# make sure we are under the root of the project
__magic_name__ = Path(__file__).resolve().parent.parent.parent
__magic_name__ = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
__magic_name__ = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name)
| 657 | 0 |
"""simple docstring"""
def lowerCamelCase__ ( __snake_case = 1_00_00_00 ) -> int:
"""simple docstring"""
_UpperCamelCase = [i - 1 for i in range(limit + 1 )]
for i in range(2, limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i, limit + 1, __snake_case ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 19 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _lowerCAmelCase ( metaclass=lowerCamelCase ):
lowercase_ : Dict = ['''torch''', '''torchsde''']
def __init__( self , *a_ , **a_ ) -> Optional[int]:
requires_backends(self , ["torch", "torchsde"] )
@classmethod
def _a ( cls , *a_ , **a_ ) -> Optional[Any]:
requires_backends(cls , ["torch", "torchsde"] )
@classmethod
def _a ( cls , *a_ , **a_ ) -> List[Any]:
requires_backends(cls , ["torch", "torchsde"] )
| 657 | 0 |
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class lowercase_ :
snake_case =None
def __UpperCamelCase ( self) -> Optional[int]:
a__ =self.feature_extraction_class(**self.feat_extract_dict)
a__ =json.loads(feat_extract.to_json_string())
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , lowercase_)
def __UpperCamelCase ( self) -> List[str]:
a__ =self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
a__ =os.path.join(lowercase_ , 'feat_extract.json')
feat_extract_first.to_json_file(lowercase_)
a__ =self.feature_extraction_class.from_json_file(lowercase_)
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict())
def __UpperCamelCase ( self) -> Tuple:
a__ =self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
a__ =feat_extract_first.save_pretrained(lowercase_)[0]
check_json_file_has_correct_format(lowercase_)
a__ =self.feature_extraction_class.from_pretrained(lowercase_)
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict())
def __UpperCamelCase ( self) -> List[Any]:
a__ =self.feature_extraction_class()
self.assertIsNotNone(lowercase_)
| 20 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
__magic_name__ = logging.get_logger(__name__)
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : Optional[Any] = '''upernet'''
def __init__( self , a_=None , a_=512 , a_=0.02 , a_=[1, 2, 3, 6] , a_=True , a_=0.4 , a_=384 , a_=256 , a_=1 , a_=False , a_=255 , **a_ , ) -> List[Any]:
super().__init__(**a_ )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_UpperCAmelCase = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(a_ , a_ ):
_UpperCAmelCase = backbone_config.get("model_type" )
_UpperCAmelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase = config_class.from_dict(a_ )
_UpperCAmelCase = backbone_config
_UpperCAmelCase = hidden_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = pool_scales
_UpperCAmelCase = use_auxiliary_head
_UpperCAmelCase = auxiliary_loss_weight
_UpperCAmelCase = auxiliary_in_channels
_UpperCAmelCase = auxiliary_channels
_UpperCAmelCase = auxiliary_num_convs
_UpperCAmelCase = auxiliary_concat_input
_UpperCAmelCase = loss_ignore_index
def _a ( self ) -> int:
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.backbone_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
| 657 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase_ : Dict = {
"configuration_roberta_prelayernorm": [
"ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP",
"RobertaPreLayerNormConfig",
"RobertaPreLayerNormOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Any = [
"ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaPreLayerNormForCausalLM",
"RobertaPreLayerNormForMaskedLM",
"RobertaPreLayerNormForMultipleChoice",
"RobertaPreLayerNormForQuestionAnswering",
"RobertaPreLayerNormForSequenceClassification",
"RobertaPreLayerNormForTokenClassification",
"RobertaPreLayerNormModel",
"RobertaPreLayerNormPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : str = [
"TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaPreLayerNormForCausalLM",
"TFRobertaPreLayerNormForMaskedLM",
"TFRobertaPreLayerNormForMultipleChoice",
"TFRobertaPreLayerNormForQuestionAnswering",
"TFRobertaPreLayerNormForSequenceClassification",
"TFRobertaPreLayerNormForTokenClassification",
"TFRobertaPreLayerNormMainLayer",
"TFRobertaPreLayerNormModel",
"TFRobertaPreLayerNormPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = [
"FlaxRobertaPreLayerNormForCausalLM",
"FlaxRobertaPreLayerNormForMaskedLM",
"FlaxRobertaPreLayerNormForMultipleChoice",
"FlaxRobertaPreLayerNormForQuestionAnswering",
"FlaxRobertaPreLayerNormForSequenceClassification",
"FlaxRobertaPreLayerNormForTokenClassification",
"FlaxRobertaPreLayerNormModel",
"FlaxRobertaPreLayerNormPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 21 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import _LazyModule
__magic_name__ = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 657 | 0 |
'''simple docstring'''
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
_snake_case : str = float('nan')
class A :
def __init__( self : Union[str, Any] , lowerCAmelCase_ : Optional[int] ) -> List[Any]:
"""simple docstring"""
_a = sys.stdout
_a = open(lowerCAmelCase_ , '''a''' )
def __getattr__( self : Dict , lowerCAmelCase_ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return getattr(self.stdout , lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : Optional[int] ) -> str:
"""simple docstring"""
self.stdout.write(lowerCAmelCase_ )
# strip tqdm codes
self.file.write(re.sub(R'''^.*\r''' , '''''' , lowerCAmelCase_ , 0 , re.M ) )
def snake_case_ (UpperCamelCase : List[str]=80 , UpperCamelCase : int=False ):
'''simple docstring'''
_a = []
# deal with critical env vars
_a = ['''CUDA_VISIBLE_DEVICES''']
for key in env_keys:
_a = os.environ.get(UpperCamelCase , UpperCamelCase )
if val is not None:
cmd.append(f'{key}={val}' )
# python executable (not always needed if the script is executable)
_a = sys.executable if full_python_path else sys.executable.split('''/''' )[-1]
cmd.append(UpperCamelCase )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
_a = []
_a = ''''''
while len(UpperCamelCase ) > 0:
current_line += f'{cmd.pop(0 )} '
if len(UpperCamelCase ) == 0 or len(UpperCamelCase ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(UpperCamelCase )
_a = ''''''
return "\\\n".join(UpperCamelCase )
def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : List[Any] ):
'''simple docstring'''
_a = re.sub(R'''[\\\n]+''' , ''' ''' , args.base_cmd )
# remove --output_dir if any and set our own
_a = re.sub('''--output_dir\s+[^\s]+''' , '''''' , args.base_cmd )
args.base_cmd += f' --output_dir {output_dir}'
# ensure we have --overwrite_output_dir
_a = re.sub('''--overwrite_output_dir\s+''' , '''''' , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def snake_case_ (UpperCamelCase : Optional[Any] , UpperCamelCase : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : Dict , UpperCamelCase : List[str] , UpperCamelCase : Any ):
'''simple docstring'''
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.22222222] )} , )
_a = subprocess.run(UpperCamelCase , capture_output=UpperCamelCase , text=UpperCamelCase )
if verbose:
print('''STDOUT''' , result.stdout )
print('''STDERR''' , result.stderr )
# save the streams
_a = variation.replace(''' ''' , '''-''' )
with open(Path(UpperCamelCase ) / f'log.{prefix}.stdout.txt' , '''w''' ) as f:
f.write(result.stdout )
with open(Path(UpperCamelCase ) / f'log.{prefix}.stderr.txt' , '''w''' ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print('''failed''' )
return {target_metric_key: nan}
with io.open(f'{output_dir}/all_results.json' , '''r''' , encoding='''utf-8''' ) as f:
_a = json.load(UpperCamelCase )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def snake_case_ (UpperCamelCase : List[str] , UpperCamelCase : Tuple , UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : Dict , UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : Any , UpperCamelCase : List[str] , ):
'''simple docstring'''
_a = []
_a = []
_a = f'{id}: {variation:<{longest_variation_len}}'
_a = f'{preamble}: '
_a = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(UpperCamelCase ) , desc=UpperCamelCase , leave=UpperCamelCase ):
_a = process_run_single(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
_a = single_run_metrics[target_metric_key]
if not math.isnan(UpperCamelCase ):
metrics.append(UpperCamelCase )
results.append(UpperCamelCase )
outcome += "✓"
else:
outcome += "✘"
_a = f'\33[2K\r{outcome}'
if len(UpperCamelCase ) > 0:
_a = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
_a = round(mean_metrics[target_metric_key] , 2 )
_a = f'{outcome} {mean_target}'
if len(UpperCamelCase ) > 1:
results_str += f' {tuple(round(UpperCamelCase , 2 ) for x in results )}'
print(UpperCamelCase )
_a = variation
return mean_metrics
else:
print(UpperCamelCase )
return {variation_key: variation, target_metric_key: nan}
def snake_case_ ():
'''simple docstring'''
_a = torch.cuda.get_device_properties(torch.device('''cuda''' ) )
return f'\nDatetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n'
def snake_case_ (UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Dict , UpperCamelCase : Dict , UpperCamelCase : str ):
'''simple docstring'''
_a = pd.DataFrame(UpperCamelCase )
_a = '''variation'''
_a = '''diff_%'''
_a = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
_a = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(UpperCamelCase ):
# as a fallback, use the minimal value as the sentinel
_a = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(UpperCamelCase ):
_a = df.apply(
lambda UpperCamelCase : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis='''columns''' , )
# re-order columns
_a = [variation_key, target_metric_key, diff_key, *report_metric_keys]
_a = df.reindex(UpperCamelCase , axis='''columns''' ) # reorder cols
# capitalize
_a = df.rename(str.capitalize , axis='''columns''' )
# make the cols as narrow as possible
_a = df.rename(lambda UpperCamelCase : c.replace('''_''' , '''<br>''' ) , axis='''columns''' )
_a = df.rename(lambda UpperCamelCase : c.replace('''_''' , '''\n''' ) , axis='''columns''' )
_a = ['''''', '''Copy between the cut-here-lines and paste as is to github or a forum''']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=UpperCamelCase , floatfmt='''.2f''' )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=UpperCamelCase , floatfmt='''.2f''' )]
print('''\n\n'''.join(UpperCamelCase ) )
def snake_case_ ():
'''simple docstring'''
_a = argparse.ArgumentParser()
parser.add_argument(
'''--base-cmd''' , default=UpperCamelCase , type=UpperCamelCase , required=UpperCamelCase , help='''Base cmd''' , )
parser.add_argument(
'''--variations''' , default=UpperCamelCase , type=UpperCamelCase , nargs='''+''' , required=UpperCamelCase , help='''Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'''' , )
parser.add_argument(
'''--base-variation''' , default=UpperCamelCase , type=UpperCamelCase , help='''Baseline variation to compare to. if None the minimal target value will be used to compare against''' , )
parser.add_argument(
'''--target-metric-key''' , default=UpperCamelCase , type=UpperCamelCase , required=UpperCamelCase , help='''Target metric key in output_dir/all_results.json, e.g., train_samples_per_second''' , )
parser.add_argument(
'''--report-metric-keys''' , default='''''' , type=UpperCamelCase , help='''Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples''' , )
parser.add_argument(
'''--repeat-times''' , default=1 , type=UpperCamelCase , help='''How many times to re-run each variation - an average will be reported''' , )
parser.add_argument(
'''--output_dir''' , default='''output_benchmark''' , type=UpperCamelCase , help='''The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked''' , )
parser.add_argument(
'''--verbose''' , default=UpperCamelCase , action='''store_true''' , help='''Whether to show the outputs of each run or just the benchmark progress''' , )
_a = parser.parse_args()
_a = args.output_dir
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
_a = get_base_command(UpperCamelCase , UpperCamelCase )
# split each dimension into its --foo variations
_a = [list(map(str.strip , re.split(R'''\|''' , UpperCamelCase ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
_a = list(map(str.strip , map(''' '''.join , itertools.product(*UpperCamelCase ) ) ) )
_a = max(len(UpperCamelCase ) for x in variations )
# split wanted keys
_a = args.report_metric_keys.split()
# capture prints into a log file for convenience
_a = f'benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'
print(f'\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt' )
print(f'and this script\'s output is also piped into {report_fn}' )
_a = Tee(UpperCamelCase )
print(f'\n*** Running {len(UpperCamelCase )} benchmarks:' )
print(f'Base command: {" ".join(UpperCamelCase )}' )
_a = '''variation'''
_a = []
for id, variation in enumerate(tqdm(UpperCamelCase , desc='''Total completion: ''' , leave=UpperCamelCase ) ):
_a = base_cmd + variation.split()
results.append(
process_run(
id + 1 , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , args.target_metric_key , UpperCamelCase , args.repeat_times , UpperCamelCase , args.verbose , ) )
process_results(UpperCamelCase , args.target_metric_key , UpperCamelCase , args.base_variation , UpperCamelCase )
if __name__ == "__main__":
main()
| 22 |
"""simple docstring"""
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCAmelCase :
def __init__( self , a_ , a_=2 , a_=3 , a_=4 , a_=2 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=99 , a_=36 , a_=3 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=16 , a_=2 , a_=0.02 , a_=6 , a_=6 , a_=3 , a_=4 , a_=None , a_=1000 , ) -> Optional[Any]:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = text_seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = coordinate_size
_UpperCAmelCase = shape_size
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_UpperCAmelCase = text_seq_length
_UpperCAmelCase = (image_size // patch_size) ** 2 + 1
_UpperCAmelCase = self.text_seq_length + self.image_seq_length
def _a ( self ) -> Dict:
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_UpperCAmelCase = bbox[i, j, 3]
_UpperCAmelCase = bbox[i, j, 1]
_UpperCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_UpperCAmelCase = bbox[i, j, 2]
_UpperCAmelCase = bbox[i, j, 0]
_UpperCAmelCase = t
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.text_seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_UpperCAmelCase = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Tuple:
_UpperCAmelCase = LayoutLMvaModel(config=a_ )
model.to(a_ )
model.eval()
# text + image
_UpperCAmelCase = model(a_ , pixel_values=a_ )
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ )
_UpperCAmelCase = model(a_ , bbox=a_ , pixel_values=a_ , token_type_ids=a_ )
_UpperCAmelCase = model(a_ , bbox=a_ , pixel_values=a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_UpperCAmelCase = model(a_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_UpperCAmelCase = model(pixel_values=a_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Optional[Any]:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = LayoutLMvaForSequenceClassification(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Union[str, Any]:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = LayoutLMvaForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Dict:
_UpperCAmelCase = LayoutLMvaForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , start_positions=a_ , end_positions=a_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self ) -> Optional[int]:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase_ : Any = False
lowercase_ : Dict = False
lowercase_ : List[str] = False
lowercase_ : str = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase_ : int = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def _a ( self , a_ , a_ , a_ , a_ , a_ ) -> List[str]:
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = LayoutLMvaModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=a_ , hidden_size=37 )
def _a ( self , a_ , a_ , a_=False ) -> List[str]:
_UpperCAmelCase = copy.deepcopy(a_ )
if model_class in get_values(a_ ):
_UpperCAmelCase = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(a_ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(a_ ):
_UpperCAmelCase = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=a_ )
elif model_class in get_values(a_ ):
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
elif model_class in [
*get_values(a_ ),
]:
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
elif model_class in [
*get_values(a_ ),
]:
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=a_ , )
return inputs_dict
def _a ( self ) -> int:
self.config_tester.run_common_tests()
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase = type
self.model_tester.create_and_check_model(*a_ )
def _a ( self ) -> int:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a_ )
def _a ( self ) -> Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a_ )
@slow
def _a ( self ) -> List[str]:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = LayoutLMvaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _a ( self ) -> List[Any]:
return LayoutLMvaImageProcessor(apply_ocr=a_ ) if is_vision_available() else None
@slow
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(a_ )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=a_ , return_tensors="pt" ).pixel_values.to(a_ )
_UpperCAmelCase = torch.tensor([[1, 2]] )
_UpperCAmelCase = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
_UpperCAmelCase = model(
input_ids=input_ids.to(a_ ) , bbox=bbox.to(a_ ) , pixel_values=pixel_values.to(a_ ) , )
# verify the logits
_UpperCAmelCase = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , a_ )
_UpperCAmelCase = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , a_ , atol=1e-4 ) )
| 657 | 0 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def _snake_case (__lowercase , __lowercase):
UpperCamelCase_ = f"""{sampling_rate}"""
UpperCamelCase_ = '1'
UpperCamelCase_ = 'f32le'
UpperCamelCase_ = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(__lowercase , stdin=subprocess.PIPE , stdout=subprocess.PIPE) as ffmpeg_process:
UpperCamelCase_ = ffmpeg_process.communicate(__lowercase)
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename') from error
UpperCamelCase_ = output_stream[0]
UpperCamelCase_ = np.frombuffer(__lowercase , np.floataa)
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile')
return audio
def _snake_case (__lowercase , __lowercase , __lowercase = "f32le" , ):
UpperCamelCase_ = f"""{sampling_rate}"""
UpperCamelCase_ = '1'
if format_for_conversion == "s16le":
UpperCamelCase_ = 2
elif format_for_conversion == "f32le":
UpperCamelCase_ = 4
else:
raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""")
UpperCamelCase_ = platform.system()
if system == "Linux":
UpperCamelCase_ = 'alsa'
UpperCamelCase_ = 'default'
elif system == "Darwin":
UpperCamelCase_ = 'avfoundation'
UpperCamelCase_ = ':0'
elif system == "Windows":
UpperCamelCase_ = 'dshow'
UpperCamelCase_ = 'default'
UpperCamelCase_ = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
UpperCamelCase_ = int(round(sampling_rate * chunk_length_s)) * size_of_sample
UpperCamelCase_ = _ffmpeg_stream(__lowercase , __lowercase)
for item in iterator:
yield item
def _snake_case (__lowercase , __lowercase , __lowercase = None , __lowercase = None , __lowercase = "f32le" , ):
if stream_chunk_s is not None:
UpperCamelCase_ = stream_chunk_s
else:
UpperCamelCase_ = chunk_length_s
UpperCamelCase_ = ffmpeg_microphone(__lowercase , __lowercase , format_for_conversion=__lowercase)
if format_for_conversion == "s16le":
UpperCamelCase_ = np.intaa
UpperCamelCase_ = 2
elif format_for_conversion == "f32le":
UpperCamelCase_ = np.floataa
UpperCamelCase_ = 4
else:
raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""")
if stride_length_s is None:
UpperCamelCase_ = chunk_length_s / 6
UpperCamelCase_ = int(round(sampling_rate * chunk_length_s)) * size_of_sample
if isinstance(__lowercase , (int, float)):
UpperCamelCase_ = [stride_length_s, stride_length_s]
UpperCamelCase_ = int(round(sampling_rate * stride_length_s[0])) * size_of_sample
UpperCamelCase_ = int(round(sampling_rate * stride_length_s[1])) * size_of_sample
UpperCamelCase_ = datetime.datetime.now()
UpperCamelCase_ = datetime.timedelta(seconds=__lowercase)
for item in chunk_bytes_iter(__lowercase , __lowercase , stride=(stride_left, stride_right) , stream=__lowercase):
# Put everything back in numpy scale
UpperCamelCase_ = np.frombuffer(item['raw'] , dtype=__lowercase)
UpperCamelCase_ = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
UpperCamelCase_ = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase = False):
UpperCamelCase_ = B''
UpperCamelCase_ , UpperCamelCase_ = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""")
UpperCamelCase_ = 0
for raw in iterator:
acc += raw
if stream and len(__lowercase) < chunk_len:
UpperCamelCase_ = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(__lowercase) >= chunk_len:
# We are flushing the accumulator
UpperCamelCase_ = (_stride_left, stride_right)
UpperCamelCase_ = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
UpperCamelCase_ = False
yield item
UpperCamelCase_ = stride_left
UpperCamelCase_ = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(__lowercase) > stride_left:
UpperCamelCase_ = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
UpperCamelCase_ = False
yield item
def _snake_case (__lowercase , __lowercase):
UpperCamelCase_ = 2**24 # 16Mo
try:
with subprocess.Popen(__lowercase , stdout=subprocess.PIPE , bufsize=__lowercase) as ffmpeg_process:
while True:
UpperCamelCase_ = ffmpeg_process.stdout.read(__lowercase)
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename') from error
| 23 |
"""simple docstring"""
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _lowerCAmelCase ( unittest.TestCase ):
lowercase_ : str = MODEL_FOR_MASKED_LM_MAPPING
lowercase_ : List[str] = TF_MODEL_FOR_MASKED_LM_MAPPING
def _a ( self ) -> Optional[Any]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def _a ( self ) -> str:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf" )
_UpperCAmelCase = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is grouped", "score": 2.1e-05, "token": 38015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-05, "token": 25506, "token_str": " accuser"},
] , )
_UpperCAmelCase = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-05,
"token": 38015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-05,
"token": 25506,
"token_str": " accuser",
},
] , )
_UpperCAmelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-05, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt" )
_UpperCAmelCase = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is Maul", "score": 2.2e-05, "token": 35676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
_UpperCAmelCase = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
_UpperCAmelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is Patrick", "score": 2.1e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-05, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
] , )
_UpperCAmelCase = unmasker("My name is <mask> <mask>" , top_k=2 )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def _a ( self ) -> int:
_UpperCAmelCase = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt" )
# convert model to fp16
pipe.model.half()
_UpperCAmelCase = pipe("Paris is the [MASK] of France." )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(a_ , a_ )
@slow
@require_torch
def _a ( self ) -> int:
_UpperCAmelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt" )
self.run_large_test(a_ )
@slow
@require_tf
def _a ( self ) -> int:
_UpperCAmelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf" )
self.run_large_test(a_ )
def _a ( self , a_ ) -> int:
_UpperCAmelCase = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(a_ ) , [
{"sequence": "My name is John", "score": 0.008, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.007, "token": 1573, "token_str": " Chris"},
] , )
_UpperCAmelCase = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(a_ ) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.251,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.214,
"token": 12790,
"token_str": " Lyon",
},
] , )
_UpperCAmelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(a_ ) , [
{"sequence": "My name is Patrick", "score": 0.005, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.000, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.000, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def _a ( self ) -> Any:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt" )
_UpperCAmelCase = None
_UpperCAmelCase = None
self.run_pipeline_test(a_ , [] )
@require_tf
def _a ( self ) -> List[Any]:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf" )
_UpperCAmelCase = None
_UpperCAmelCase = None
self.run_pipeline_test(a_ , [] )
def _a ( self , a_ , a_ , a_ ) -> Optional[Any]:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)" )
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = [
f"This is another {tokenizer.mask_token} test",
]
return fill_masker, examples
def _a ( self , a_ , a_ ) -> List[str]:
_UpperCAmelCase = fill_masker.tokenizer
_UpperCAmelCase = fill_masker.model
_UpperCAmelCase = fill_masker(
f"This is a {tokenizer.mask_token}" , )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = fill_masker([f"This is a {tokenizer.mask_token}"] )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = fill_masker([f"This is a {tokenizer.mask_token}", f"Another {tokenizer.mask_token} great test."] )
self.assertEqual(
a_ , [
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
] , )
with self.assertRaises(a_ ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(a_ ):
fill_masker("This is" )
self.run_test_top_k(a_ , a_ )
self.run_test_targets(a_ , a_ )
self.run_test_top_k_targets(a_ , a_ )
self.fill_mask_with_duplicate_targets_and_top_k(a_ , a_ )
self.fill_mask_with_multiple_masks(a_ , a_ )
def _a ( self , a_ , a_ ) -> Optional[int]:
_UpperCAmelCase = tokenizer.get_vocab()
_UpperCAmelCase = sorted(vocab.keys() )[:2]
# Pipeline argument
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ , targets=a_ )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , a_ )
_UpperCAmelCase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(a_ ) )
# Call argument
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=a_ )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , a_ )
_UpperCAmelCase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(a_ ) )
# Score equivalence
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=a_ )
_UpperCAmelCase = [top_mask["token_str"] for top_mask in outputs]
_UpperCAmelCase = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(a_ ) == set(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=a_ )
_UpperCAmelCase = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(a_ ) , nested_simplify(a_ ) )
# Raises with invalid
with self.assertRaises(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=[""] )
with self.assertRaises(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets="" )
def _a ( self , a_ , a_ ) -> str:
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ , top_k=2 )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
self.assertEqual(nested_simplify(a_ ) , nested_simplify(a_ ) )
def _a ( self , a_ , a_ ) -> List[Any]:
_UpperCAmelCase = tokenizer.get_vocab()
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
# top_k=2, ntargets=3
_UpperCAmelCase = sorted(vocab.keys() )[:3]
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=2 , targets=a_ )
# If we use the most probably targets, and filter differently, we should still
# have the same results
_UpperCAmelCase = [el["token_str"] for el in sorted(a_ , key=lambda a_ : x["score"] , reverse=a_ )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(a_ ).issubset(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=3 , targets=a_ )
# They should yield exactly the same result
self.assertEqual(nested_simplify(a_ ) , nested_simplify(a_ ) )
def _a ( self , a_ , a_ ) -> Optional[Any]:
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = tokenizer.get_vocab()
# String duplicates + id duplicates
_UpperCAmelCase = sorted(vocab.keys() )[:3]
_UpperCAmelCase = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_UpperCAmelCase = fill_masker(f"My name is {tokenizer.mask_token}" , targets=a_ , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(a_ ) , 3 )
def _a ( self , a_ , a_ ) -> Any:
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = fill_masker(
f"This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
a_ , [
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
] , )
| 657 | 0 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : list[int] )-> list[list[int]]:
'''simple docstring'''
__snake_case = []
if len(_lowerCamelCase ) == 1:
return [nums.copy()]
for _ in range(len(_lowerCamelCase ) ):
__snake_case = nums.pop(0 )
__snake_case = permute(_lowerCamelCase )
for perm in permutations:
perm.append(_lowerCamelCase )
result.extend(_lowerCamelCase )
nums.append(_lowerCamelCase )
return result
def _UpperCamelCase (_lowerCamelCase : Optional[int] )-> Optional[Any]:
'''simple docstring'''
def backtrack(_lowerCamelCase : str ):
if start == len(_lowerCamelCase ) - 1:
output.append(nums[:] )
else:
for i in range(_lowerCamelCase , len(_lowerCamelCase ) ):
__snake_case , __snake_case = nums[i], nums[start]
backtrack(start + 1 )
__snake_case , __snake_case = nums[i], nums[start] # backtrack
__snake_case = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
UpperCAmelCase_ : Union[str, Any] = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 24 |
"""simple docstring"""
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class _lowerCAmelCase ( lowerCamelCase ):
def _a ( self ) -> List[str]:
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def _a ( self ) -> Optional[int]:
with self.assertRaises(a_ ):
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def _a ( self ) -> int:
with self.assertRaises(a_ ):
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("bool" ) , type=Value("int64" ) ) )
def _a ( self ) -> Optional[Any]:
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _a ( self ) -> int:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , type=Value("int64" ) ) )
def _a ( self ) -> Dict:
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , try_type=Value("int64" ) ) )
self.assertEqual(arr.type , pa.string() )
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def _a ( self ) -> Tuple:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , type=ArrayaD((1, 3) , "int64" ) ) )
def _a ( self ) -> str:
_UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def _a ( self ) -> Tuple:
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def _a ( self ) -> List[str]:
import PIL.Image
_UpperCAmelCase = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"datasets.arrow_writer.cast_to_python_objects" , side_effect=a_ ) as mock_cast_to_python_objects:
_UpperCAmelCase = pa.array(TypedSequence([{"path": None, "bytes": B"image_bytes"}, pil_image] , type=Image() ) )
_UpperCAmelCase , _UpperCAmelCase = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("optimize_list_casting" , a_ )
self.assertFalse(kwargs["optimize_list_casting"] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferReader(UpperCamelCase__ ) if isinstance(UpperCamelCase__ , pa.Buffer ) else pa.memory_map(UpperCamelCase__ )
_UpperCAmelCase = pa.ipc.open_stream(UpperCamelCase__ )
_UpperCAmelCase = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = Features({"labels": ClassLabel(names=["neg", "pos"] )} )
with ArrowWriter(stream=UpperCamelCase__ , features=UpperCamelCase__ ) as writer:
writer.write({"labels": 0} )
writer.write({"labels": 1} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pa.ipc.open_stream(UpperCamelCase__ )
_UpperCAmelCase = f.read_all()
_UpperCAmelCase = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(UpperCamelCase__ )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ , hash_salt="split_name" , check_duplicates=UpperCamelCase__ , ) as writer:
with pytest.raises(UpperCamelCase__ ):
writer.write({"col_1": "foo", "col_2": 1} , key=[1, 2] )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ , hash_salt="split_name" , check_duplicates=UpperCamelCase__ , ) as writer:
with pytest.raises(UpperCamelCase__ ):
writer.write({"col_1": "foo", "col_2": 1} , key=10 )
writer.write({"col_1": "bar", "col_2": 2} , key=10 )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ , hash_salt="split_name" , check_duplicates=UpperCamelCase__ , ) as writer:
writer.write({"col_1": "foo", "col_2": 1} , key=1 )
writer.write({"col_1": "bar", "col_2": 2} , key=2 )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
writer.write_batch({"col_1": [], "col_2": []} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write_table(pa.Table.from_pydict({"col_1": ["foo", "bar"], "col_2": [1, 2]} ) )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write_row(pa.Table.from_pydict({"col_1": ["foo"], "col_2": [1]} ) )
writer.write_row(pa.Table.from_pydict({"col_1": ["bar"], "col_2": [2]} ) )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __lowerCamelCase ( ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
_UpperCAmelCase = os.path.join(UpperCamelCase__ , "test.arrow" )
with ArrowWriter(path=UpperCamelCase__ , schema=pa.schema(UpperCamelCase__ ) ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(UpperCamelCase__ , 1 )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if pa.types.is_list(UpperCamelCase__ ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if isinstance(lst[0] , UpperCamelCase__ ):
change_first_primitive_element_in_list(lst[0] , UpperCamelCase__ )
else:
_UpperCAmelCase = value
@pytest.mark.parametrize("optimized_int_type, expected_dtype" , [(None, pa.intaa()), (Value("int32" ), pa.intaa())] )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.array(TypedSequence(UpperCamelCase__ , optimized_int_type=UpperCamelCase__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"col, expected_dtype" , [
("attention_mask", pa.inta()),
("special_tokens_mask", pa.inta()),
("token_type_ids", pa.inta()),
("input_ids", pa.intaa()),
("other", pa.intaa()),
] , )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.array(OptimizedTypedSequence(UpperCamelCase__ , col=UpperCamelCase__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
_UpperCAmelCase = copy.deepcopy(UpperCamelCase__ )
_UpperCAmelCase = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase = pa.array(OptimizedTypedSequence(UpperCamelCase__ , col=UpperCamelCase__ ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("raise_exception" , [False, True] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = str(tmp_path / "dataset-train.arrow" )
try:
with ArrowWriter(path=UpperCamelCase__ ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = "mock://dataset-train.arrow"
with ArrowWriter(path=UpperCamelCase__ , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(UpperCamelCase__ ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(UpperCamelCase__ )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(stream=UpperCamelCase__ ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pq.read_table(UpperCamelCase__ )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("embed_local_files" , [False, True] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
import PIL.Image
_UpperCAmelCase = str(tmp_path / "test_image_rgb.jpg" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(UpperCamelCase__ , format="png" )
_UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(
stream=UpperCamelCase__ , features=Features({"image": Image()} ) , embed_local_files=UpperCamelCase__ ) as writer:
writer.write({"image": image_path} )
writer.finalize()
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pq.read_table(UpperCamelCase__ )
_UpperCAmelCase = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["image"][0]["path"] , UpperCamelCase__ )
with open(UpperCamelCase__ , "rb" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = pa.schema([pa.field("col_1" , pa.string() , nullable=UpperCamelCase__ )] )
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(stream=UpperCamelCase__ ) as writer:
writer._build_writer(inferred_schema=UpperCamelCase__ )
assert writer._schema == pa.schema([pa.field("col_1" , pa.string() )] )
| 657 | 0 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
a_ = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
a_ = {
'allenai/led-base-16384': 1_6384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Union[str, Any] = (
list(range(ord("!") , ord("~") + 1)) + list(range(ord("¡") , ord("¬") + 1)) + list(range(ord("®") , ord("ÿ") + 1))
)
SCREAMING_SNAKE_CASE : Optional[Any] = bs[:]
SCREAMING_SNAKE_CASE : List[Any] = 0
for b in range(2**8):
if b not in bs:
bs.append(_a)
cs.append(2**8 + n)
n += 1
SCREAMING_SNAKE_CASE : Tuple = [chr(_a) for n in cs]
return dict(zip(_a , _a))
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Tuple = set()
SCREAMING_SNAKE_CASE : Optional[int] = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
SCREAMING_SNAKE_CASE : Union[str, Any] = char
return pairs
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =VOCAB_FILES_NAMES
lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ =['input_ids', 'attention_mask']
def __init__( self : int , a : int , a : Any , a : List[Any]="replace" , a : Optional[Any]="<s>" , a : str="</s>" , a : Optional[Any]="</s>" , a : Optional[int]="<s>" , a : Optional[int]="<unk>" , a : Union[str, Any]="<pad>" , a : Tuple="<mask>" , a : Any=False , **a : Optional[int] , ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else bos_token
SCREAMING_SNAKE_CASE : Any = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else eos_token
SCREAMING_SNAKE_CASE : str = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else sep_token
SCREAMING_SNAKE_CASE : Any = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else cls_token
SCREAMING_SNAKE_CASE : Tuple = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else unk_token
SCREAMING_SNAKE_CASE : int = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE : List[Any] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
super().__init__(
errors=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , add_prefix_space=a , **a , )
with open(a , encoding="utf-8" ) as vocab_handle:
SCREAMING_SNAKE_CASE : Dict = json.load(a )
SCREAMING_SNAKE_CASE : Tuple = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE : List[str] = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE : Dict = bytes_to_unicode()
SCREAMING_SNAKE_CASE : Any = {v: k for k, v in self.byte_encoder.items()}
with open(a , encoding="utf-8" ) as merges_handle:
SCREAMING_SNAKE_CASE : str = merges_handle.read().split("\n" )[1:-1]
SCREAMING_SNAKE_CASE : Tuple = [tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE : Dict = dict(zip(a , range(len(a ) ) ) )
SCREAMING_SNAKE_CASE : List[str] = {}
SCREAMING_SNAKE_CASE : str = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE : Dict = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def __UpperCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
return len(self.encoder )
def __UpperCamelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __UpperCamelCase ( self : Tuple , a : Tuple ) -> List[Any]:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE : Any = tuple(a )
SCREAMING_SNAKE_CASE : str = get_pairs(a )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE : int = min(a , key=lambda a : self.bpe_ranks.get(a , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[str] = bigram
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
while i < len(a ):
try:
SCREAMING_SNAKE_CASE : Optional[Any] = word.index(a , a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE : int = j
if word[i] == first and i < len(a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE : Optional[Any] = tuple(a )
SCREAMING_SNAKE_CASE : int = new_word
if len(a ) == 1:
break
else:
SCREAMING_SNAKE_CASE : Any = get_pairs(a )
SCREAMING_SNAKE_CASE : Union[str, Any] = " ".join(a )
SCREAMING_SNAKE_CASE : List[Any] = word
return word
def __UpperCamelCase ( self : Tuple , a : List[str] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = []
for token in re.findall(self.pat , a ):
SCREAMING_SNAKE_CASE : Optional[Any] = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(a ).split(" " ) )
return bpe_tokens
def __UpperCamelCase ( self : List[Any] , a : str ) -> str:
"""simple docstring"""
return self.encoder.get(a , self.encoder.get(self.unk_token ) )
def __UpperCamelCase ( self : Optional[int] , a : Any ) -> Any:
"""simple docstring"""
return self.decoder.get(a )
def __UpperCamelCase ( self : List[str] , a : List[str] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = "".join(a )
SCREAMING_SNAKE_CASE : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def __UpperCamelCase ( self : str , a : str , a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(a ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE : str = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE : int = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(a , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=a , ensure_ascii=a ) + "\n" )
SCREAMING_SNAKE_CASE : List[Any] = 0
with open(a , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
SCREAMING_SNAKE_CASE : int = token_index
writer.write(" ".join(a ) + "\n" )
index += 1
return vocab_file, merge_file
def __UpperCamelCase ( self : Optional[Any] , a : List[int] , a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE : Any = [self.cls_token_id]
SCREAMING_SNAKE_CASE : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCamelCase ( self : Tuple , a : List[int] , a : Optional[List[int]] = None , a : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
if token_ids_a is None:
return [1] + ([0] * len(a )) + [1]
return [1] + ([0] * len(a )) + [1, 1] + ([0] * len(a )) + [1]
def __UpperCamelCase ( self : List[str] , a : List[int] , a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self : List[Any] , a : Union[str, Any] , a : Any=False , **a : Tuple ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(a ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE : Union[str, Any] = " " + text
return (text, kwargs)
def __UpperCamelCase ( self : Union[str, Any] , a : Union[Dict[str, EncodedInput], BatchEncoding] , a : Optional[int] = None , a : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , a : Optional[int] = None , a : Optional[bool] = None , ) -> dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = super()._pad(
encoded_inputs=a , max_length=a , padding_strategy=a , pad_to_multiple_of=a , return_attention_mask=a , )
# Load from model defaults
if return_attention_mask is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
SCREAMING_SNAKE_CASE : Optional[int] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
SCREAMING_SNAKE_CASE : str = len(encoded_inputs["global_attention_mask"] ) != len(a )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE : Tuple = len(a ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
SCREAMING_SNAKE_CASE : Dict = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
SCREAMING_SNAKE_CASE : int = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs | 25 |
"""simple docstring"""
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class _lowerCAmelCase ( unittest.TestCase ):
def _a ( self ) -> Optional[Any]:
_UpperCAmelCase = ["a", "b", "c"]
# Defaults to last layer if both are None
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(a_ , a_ , a_ )
self.assertEqual(a_ , ["c"] )
self.assertEqual(a_ , [2] )
# Out indices set to match out features
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(["a", "c"] , a_ , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features set to match out indices
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(a_ , [0, 2] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features selected from negative indices
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(a_ , [-3, -1] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [-3, -1] )
def _a ( self ) -> Optional[int]:
# Stage names must be set
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , a_ )
# Out features must be a list
with self.assertRaises(a_ ):
verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] )
# Out features must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] )
# Out indices must be a list or tuple
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , 0 , ["a", "b"] )
# Out indices must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , (0, 1) , ["a"] )
# Out features and out indices must be the same length
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] )
# Out features should match out indices
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] )
# Out features and out indices should be in order
with self.assertRaises(a_ ):
verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] )
# Check passes with valid inputs
verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] )
def _a ( self ) -> int:
_UpperCAmelCase = BackboneMixin()
_UpperCAmelCase = ["a", "b", "c"]
_UpperCAmelCase = ["a", "c"]
_UpperCAmelCase = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
_UpperCAmelCase = ["a", "b"]
self.assertEqual(backbone.out_features , ["a", "b"] )
self.assertEqual(backbone.out_indices , [0, 1] )
_UpperCAmelCase = [-3, -1]
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 657 | 0 |
'''simple docstring'''
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
__UpperCamelCase = logging.get_logger(__name__)
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
def run_func(_lowerCamelCase ):
@wraps(_lowerCamelCase )
def run_in_eager_mode(*_lowerCamelCase , **_lowerCamelCase ):
return func(*_lowerCamelCase , **_lowerCamelCase )
@wraps(_lowerCamelCase )
@tf.function(experimental_compile=_lowerCamelCase )
def run_in_graph_mode(*_lowerCamelCase , **_lowerCamelCase ):
return func(*_lowerCamelCase , **_lowerCamelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> ["tf.Tensor"]:
"""simple docstring"""
__snake_case : Dict = random.Random()
__snake_case : str = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(_lowerCamelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class _A ( __lowercase ):
lowercase__: TensorFlowBenchmarkArguments
lowercase__: PretrainedConfig
lowercase__: str = "TensorFlow"
@property
def lowercase__ ( self : Tuple ) -> Dict:
"""simple docstring"""
return tf.__version__
def lowercase__ ( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : int , __magic_name__ : int ) -> float:
"""simple docstring"""
__snake_case : Tuple = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
__snake_case : Any = self._prepare_inference_func(__magic_name__ , __magic_name__ , __magic_name__ )
return self._measure_speed(_inference )
def lowercase__ ( self : Tuple , __magic_name__ : str , __magic_name__ : int , __magic_name__ : int ) -> float:
"""simple docstring"""
__snake_case : Tuple = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
__snake_case : List[Any] = self._prepare_train_func(__magic_name__ , __magic_name__ , __magic_name__ )
return self._measure_speed(_train )
def lowercase__ ( self : int , __magic_name__ : str , __magic_name__ : int , __magic_name__ : int ) -> [Memory, Optional[MemorySummary]]:
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __magic_name__ )
__snake_case : List[Any] = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
__snake_case : Optional[Any] = self._prepare_inference_func(__magic_name__ , __magic_name__ , __magic_name__ )
return self._measure_memory(_inference )
def lowercase__ ( self : List[Any] , __magic_name__ : str , __magic_name__ : int , __magic_name__ : int ) -> [Memory, Optional[MemorySummary]]:
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __magic_name__ )
__snake_case : Tuple = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
__snake_case : List[str] = self._prepare_train_func(__magic_name__ , __magic_name__ , __magic_name__ )
return self._measure_memory(_train )
def lowercase__ ( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : int , __magic_name__ : int ) -> Callable[[], None]:
"""simple docstring"""
__snake_case : List[Any] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
__snake_case : str = (
hasattr(__magic_name__ , """architectures""" )
and isinstance(config.architectures , __magic_name__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__snake_case : List[Any] = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
__snake_case : Any = __import__("""transformers""" , fromlist=[model_class] )
__snake_case : Union[str, Any] = getattr(__magic_name__ , __magic_name__ )
__snake_case : Dict = model_cls(__magic_name__ )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
__snake_case : int = TF_MODEL_MAPPING[config.__class__](__magic_name__ )
# encoder-decoder has vocab size saved differently
__snake_case : Optional[int] = config.vocab_size if hasattr(__magic_name__ , """vocab_size""" ) else config.encoder.vocab_size
__snake_case : str = random_input_ids(__magic_name__ , __magic_name__ , __magic_name__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(__magic_name__ , decoder_input_ids=__magic_name__ , training=__magic_name__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(__magic_name__ , training=__magic_name__ )
__snake_case : Any = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def lowercase__ ( self : Optional[int] , __magic_name__ : str , __magic_name__ : int , __magic_name__ : int ) -> Callable[[], None]:
"""simple docstring"""
__snake_case : Tuple = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
__snake_case : Optional[int] = (
hasattr(__magic_name__ , """architectures""" )
and isinstance(config.architectures , __magic_name__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__snake_case : Optional[int] = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
__snake_case : Dict = __import__("""transformers""" , fromlist=[model_class] )
__snake_case : List[Any] = getattr(__magic_name__ , __magic_name__ )
__snake_case : Tuple = model_cls(__magic_name__ )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
__snake_case : Union[str, Any] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__magic_name__ )
# encoder-decoder has vocab size saved differently
__snake_case : Optional[Any] = config.vocab_size if hasattr(__magic_name__ , """vocab_size""" ) else config.encoder.vocab_size
__snake_case : List[str] = random_input_ids(__magic_name__ , __magic_name__ , __magic_name__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
__snake_case : Tuple = model(__magic_name__ , decoder_input_ids=__magic_name__ , labels=__magic_name__ , training=__magic_name__ )[0]
__snake_case : Dict = tf.gradients(__magic_name__ , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
__snake_case : Optional[Any] = model(__magic_name__ , labels=__magic_name__ , training=__magic_name__ )[0]
__snake_case : Optional[int] = tf.gradients(__magic_name__ , model.trainable_variables )
return gradients
__snake_case : str = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def lowercase__ ( self : str , __magic_name__ : Tuple ) -> float:
"""simple docstring"""
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(__magic_name__ , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
__snake_case : Optional[int] = timeit.repeat(
__magic_name__ , repeat=self.args.repeat , number=10 , )
return min(__magic_name__ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
def lowercase__ ( self : Tuple , __magic_name__ : Callable[[], None] ) -> [Memory, MemorySummary]:
"""simple docstring"""
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
__snake_case : Dict = start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won't log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
__snake_case : int = """N/A"""
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
__snake_case : Any = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
__snake_case : Union[str, Any] = nvml.nvmlDeviceGetMemoryInfo(__magic_name__ )
__snake_case : Union[str, Any] = meminfo.used
__snake_case : List[Any] = Memory(__magic_name__ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
__snake_case : int = None
else:
__snake_case : Dict = measure_peak_memory_cpu(__magic_name__ )
__snake_case : Union[str, Any] = Memory(__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else memory_bytes
if self.args.trace_memory_line_by_line:
__snake_case : Tuple = stop_memory_tracing(__magic_name__ )
if memory is None:
__snake_case : Any = summary.total
else:
__snake_case : Any = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None
| 26 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 0 |
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> list:
"""simple docstring"""
def merge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(_SCREAMING_SNAKE_CASE ) <= 1:
return collection
_A = len(_SCREAMING_SNAKE_CASE ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : Tuple = input("Enter numbers separated by a comma:\n").strip()
__A : Tuple = [int(item) for item in user_input.split(",")]
print(*merge_sort(unsorted), sep=",")
| 27 |
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class _lowerCAmelCase ( lowerCamelCase , unittest.TestCase ):
lowercase_ : Tuple = BarthezTokenizer
lowercase_ : List[Any] = BarthezTokenizerFast
lowercase_ : Dict = True
lowercase_ : int = True
def _a ( self ) -> Any:
super().setUp()
_UpperCAmelCase = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=a_ )
_UpperCAmelCase = tokenizer
def _a ( self ) -> List[Any]:
_UpperCAmelCase = "<pad>"
_UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(a_ ) , 101122 )
def _a ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def _a ( self ) -> List[Any]:
_UpperCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_UpperCAmelCase = [0, 57, 3018, 70307, 91, 2]
_UpperCAmelCase = self.tokenizer(
a_ , max_length=len(a_ ) , padding=a_ , truncation=a_ , return_tensors="pt" )
self.assertIsInstance(a_ , a_ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
_UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(a_ , a_ )
def _a ( self ) -> str:
if not self.test_rust_tokenizer:
return
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = "I was born in 92000, and this is falsé."
_UpperCAmelCase = tokenizer.tokenize(a_ )
_UpperCAmelCase = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
_UpperCAmelCase = tokenizer.encode(a_ , add_special_tokens=a_ )
_UpperCAmelCase = rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = tokenizer.encode(a_ )
_UpperCAmelCase = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
@slow
def _a ( self ) -> Dict:
# fmt: off
_UpperCAmelCase = {"input_ids": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_UpperCAmelCase = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=a_ , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=a_ , )
| 657 | 0 |
'''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
UpperCamelCase_ = [
"cross_validation.py",
"gradient_accumulation.py",
"local_sgd.py",
"multi_process_metrics.py",
"memory.py",
"automatic_gradient_accumulation.py",
"fsdp_with_peak_mem_tracking.py",
"deepspeed_with_config_support.py",
"megatron_lm_gpt_pretraining.py",
]
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self, A, A, A = None, A = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : str = os.path.abspath(os.path.join('examples', 'by_feature' ) )
SCREAMING_SNAKE_CASE : str = os.path.abspath('examples' )
for item in os.listdir(A ):
if item not in EXCLUDE_EXAMPLES:
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(A, A )
if os.path.isfile(A ) and ".py" in item_path:
with self.subTest(
tested_script=A, feature_script=A, tested_section='main()' if parser_only else 'training_function()', ):
SCREAMING_SNAKE_CASE : str = compare_against_test(
os.path.join(A, A ), A, A, A )
SCREAMING_SNAKE_CASE : Union[str, Any] = '\n'.join(A )
if special_strings is not None:
for string in special_strings:
SCREAMING_SNAKE_CASE : int = diff.replace(A, '' )
self.assertEqual(A, '' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.one_complete_example('complete_nlp_example.py', A )
self.one_complete_example('complete_nlp_example.py', A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = os.path.abspath(os.path.join('examples', 'cv_example.py' ) )
SCREAMING_SNAKE_CASE : Dict = [
' ' * 16 + '{\n\n',
' ' * 20 + '"accuracy": eval_metric["accuracy"],\n\n',
' ' * 20 + '"f1": eval_metric["f1"],\n\n',
' ' * 20 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n',
' ' * 20 + '"epoch": epoch,\n\n',
' ' * 16 + '},\n\n',
' ' * 16 + 'step=epoch,\n',
' ' * 12,
' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n',
]
self.one_complete_example('complete_cv_example.py', A, A, A )
self.one_complete_example('complete_cv_example.py', A, A, A )
@mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''} )
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : List[Any] = False
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
super().setUpClass()
SCREAMING_SNAKE_CASE : Optional[Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(cls._tmpdir, 'default_config.yml' )
write_basic_config(save_location=cls.configPath )
SCREAMING_SNAKE_CASE : Any = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir, 'epoch_0' ) ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n ".split()
SCREAMING_SNAKE_CASE : Tuple = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir, 'step_2' ) ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir, 'epoch_0' )}\n ".split()
SCREAMING_SNAKE_CASE : List[str] = run_command(self._launch_args + testargs, return_stdout=A )
self.assertNotIn('epoch 0:', A )
self.assertIn('epoch 1:', A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir, 'step_2' )}\n ".split()
SCREAMING_SNAKE_CASE : Optional[Any] = run_command(self._launch_args + testargs, return_stdout=A )
if torch.cuda.is_available():
SCREAMING_SNAKE_CASE : Optional[Any] = torch.cuda.device_count()
else:
SCREAMING_SNAKE_CASE : Any = 1
if num_processes > 1:
self.assertNotIn('epoch 0:', A )
self.assertIn('epoch 1:', A )
else:
self.assertIn('epoch 0:', A )
self.assertIn('epoch 1:', A )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split()
with mock.patch.dict(os.environ, {'TESTING_MOCKED_DATALOADERS': '0'} ):
SCREAMING_SNAKE_CASE : Union[str, Any] = run_command(self._launch_args + testargs, return_stdout=A )
SCREAMING_SNAKE_CASE : Optional[int] = re.findall('({.+})', A )
SCREAMING_SNAKE_CASE : Dict = [r for r in results if 'accuracy' in r][-1]
SCREAMING_SNAKE_CASE : Optional[int] = ast.literal_eval(A )
self.assertGreaterEqual(results['accuracy'], 0.75 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = ['examples/by_feature/multi_process_metrics.py']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ, {'WANDB_MODE': 'offline'} )
def UpperCamelCase_ ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
SCREAMING_SNAKE_CASE : Optional[Any] = F"\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(A, 'tracking' ) ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = ['examples/by_feature/gradient_accumulation.py']
run_command(self._launch_args + testargs )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = ['examples/by_feature/local_sgd.py']
run_command(self._launch_args + testargs )
| 28 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_UpperCAmelCase = f"Input value of [number={number}] must be an integer"
raise TypeError(UpperCamelCase__ )
if number < 0:
return False
_UpperCAmelCase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ = {
"""configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""],
"""tokenization_roformer""": ["""RoFormerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ["""RoFormerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoFormerForCausalLM""",
"""RoFormerForMaskedLM""",
"""RoFormerForMultipleChoice""",
"""RoFormerForQuestionAnswering""",
"""RoFormerForSequenceClassification""",
"""RoFormerForTokenClassification""",
"""RoFormerLayer""",
"""RoFormerModel""",
"""RoFormerPreTrainedModel""",
"""load_tf_weights_in_roformer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRoFormerForCausalLM""",
"""TFRoFormerForMaskedLM""",
"""TFRoFormerForMultipleChoice""",
"""TFRoFormerForQuestionAnswering""",
"""TFRoFormerForSequenceClassification""",
"""TFRoFormerForTokenClassification""",
"""TFRoFormerLayer""",
"""TFRoFormerModel""",
"""TFRoFormerPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxRoFormerForMaskedLM""",
"""FlaxRoFormerForMultipleChoice""",
"""FlaxRoFormerForQuestionAnswering""",
"""FlaxRoFormerForSequenceClassification""",
"""FlaxRoFormerForTokenClassification""",
"""FlaxRoFormerModel""",
"""FlaxRoFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 29 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = Dict[str, Any]
__magic_name__ = List[Prediction]
@add_end_docstrings(lowerCamelCase )
class _lowerCAmelCase ( lowerCamelCase ):
def __init__( self , *a_ , **a_ ) -> Optional[int]:
super().__init__(*a_ , **a_ )
if self.framework == "tf":
raise ValueError(f"The {self.__class__} is only available in PyTorch." )
requires_backends(self , "vision" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def _a ( self , **a_ ) -> List[str]:
_UpperCAmelCase = {}
if "threshold" in kwargs:
_UpperCAmelCase = kwargs["threshold"]
return {}, {}, postprocess_kwargs
def __call__( self , *a_ , **a_ ) -> Union[Predictions, List[Prediction]]:
return super().__call__(*a_ , **a_ )
def _a ( self , a_ ) -> Optional[Any]:
_UpperCAmelCase = load_image(a_ )
_UpperCAmelCase = torch.IntTensor([[image.height, image.width]] )
_UpperCAmelCase = self.image_processor(images=[image] , return_tensors="pt" )
if self.tokenizer is not None:
_UpperCAmelCase = self.tokenizer(text=inputs["words"] , boxes=inputs["boxes"] , return_tensors="pt" )
_UpperCAmelCase = target_size
return inputs
def _a ( self , a_ ) -> Optional[Any]:
_UpperCAmelCase = model_inputs.pop("target_size" )
_UpperCAmelCase = self.model(**a_ )
_UpperCAmelCase = outputs.__class__({"target_size": target_size, **outputs} )
if self.tokenizer is not None:
_UpperCAmelCase = model_inputs["bbox"]
return model_outputs
def _a ( self , a_ , a_=0.9 ) -> int:
_UpperCAmelCase = model_outputs["target_size"]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
_UpperCAmelCase , _UpperCAmelCase = target_size[0].tolist()
def unnormalize(a_ ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
_UpperCAmelCase , _UpperCAmelCase = model_outputs["logits"].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
_UpperCAmelCase = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
_UpperCAmelCase = [unnormalize(a_ ) for bbox in model_outputs["bbox"].squeeze(0 )]
_UpperCAmelCase = ["score", "label", "box"]
_UpperCAmelCase = [dict(zip(a_ , a_ ) ) for vals in zip(scores.tolist() , a_ , a_ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
_UpperCAmelCase = self.image_processor.post_process_object_detection(a_ , a_ , a_ )
_UpperCAmelCase = raw_annotations[0]
_UpperCAmelCase = raw_annotation["scores"]
_UpperCAmelCase = raw_annotation["labels"]
_UpperCAmelCase = raw_annotation["boxes"]
_UpperCAmelCase = scores.tolist()
_UpperCAmelCase = [self.model.config.idalabel[label.item()] for label in labels]
_UpperCAmelCase = [self._get_bounding_box(a_ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
_UpperCAmelCase = ["score", "label", "box"]
_UpperCAmelCase = [
dict(zip(a_ , a_ ) )
for vals in zip(raw_annotation["scores"] , raw_annotation["labels"] , raw_annotation["boxes"] )
]
return annotation
def _a ( self , a_ ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError("The ObjectDetectionPipeline is only available in PyTorch." )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = box.int().tolist()
_UpperCAmelCase = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 657 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
__a = logging.get_logger(__name__)
__a = {
'openai/imagegpt-small': '',
'openai/imagegpt-medium': '',
'openai/imagegpt-large': '',
}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = '''imagegpt'''
lowerCAmelCase = ['''past_key_values''']
lowerCAmelCase = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self ,_SCREAMING_SNAKE_CASE=512 + 1 ,_SCREAMING_SNAKE_CASE=32 * 32 ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=24 ,_SCREAMING_SNAKE_CASE=8 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE="quick_gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,**_SCREAMING_SNAKE_CASE ,) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = vocab_size
UpperCAmelCase_ : Union[str, Any] = n_positions
UpperCAmelCase_ : Union[str, Any] = n_embd
UpperCAmelCase_ : Any = n_layer
UpperCAmelCase_ : Optional[Any] = n_head
UpperCAmelCase_ : Union[str, Any] = n_inner
UpperCAmelCase_ : List[Any] = activation_function
UpperCAmelCase_ : List[str] = resid_pdrop
UpperCAmelCase_ : str = embd_pdrop
UpperCAmelCase_ : Optional[Any] = attn_pdrop
UpperCAmelCase_ : Dict = layer_norm_epsilon
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : Dict = scale_attn_weights
UpperCAmelCase_ : Any = use_cache
UpperCAmelCase_ : List[str] = scale_attn_by_inverse_layer_idx
UpperCAmelCase_ : Tuple = reorder_and_upcast_attn
UpperCAmelCase_ : int = tie_word_embeddings
super().__init__(tie_word_embeddings=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
class __a( _a ):
"""simple docstring"""
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
] )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 1 ,_SCREAMING_SNAKE_CASE = -1 ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = 3 ,_SCREAMING_SNAKE_CASE = 32 ,_SCREAMING_SNAKE_CASE = 32 ,) -> Mapping[str, Any]:
UpperCAmelCase_ : Any = self._generate_dummy_images(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = dict(preprocessor(images=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ) )
return inputs | 30 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def merge(UpperCamelCase__ , UpperCamelCase__ ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(UpperCamelCase__ ) <= 1:
return collection
_UpperCAmelCase = len(UpperCamelCase__ ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__magic_name__ = input('''Enter numbers separated by a comma:\n''').strip()
__magic_name__ = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 657 | 0 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' , [None, 4_00 * 2**20, 6_00 * 2**20] )
@pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 1_00 * 2**20, 9_00 * 2**20] )
def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[Any] ) -> Dict:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
SCREAMING_SNAKE_CASE_ = dataset_size < in_memory_max_size
else:
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = is_small_dataset(__UpperCAmelCase )
assert result == expected | 31 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _lowerCAmelCase :
def __init__( self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ) -> List[str]:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = self.vocab_size - 1
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
_UpperCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> Optional[int]:
_UpperCAmelCase = OpenAIGPTModel(config=a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(a_ , token_type_ids=a_ , head_mask=a_ )
_UpperCAmelCase = model(a_ , token_type_ids=a_ )
_UpperCAmelCase = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> List[Any]:
_UpperCAmelCase = OpenAIGPTLMHeadModel(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> Optional[Any]:
_UpperCAmelCase = OpenAIGPTDoubleHeadsModel(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> Dict:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = OpenAIGPTForSequenceClassification(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase_ : Any = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowercase_ : Optional[Any] = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowercase_ : Union[str, Any] = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _a ( self , a_ , a_ , a_ , a_ , a_ ) -> Any:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _a ( self , a_ , a_ , a_=False ) -> Optional[int]:
_UpperCAmelCase = super()._prepare_for_class(a_ , a_ , return_labels=a_ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=a_ , )
_UpperCAmelCase = inputs_dict["labels"]
_UpperCAmelCase = inputs_dict["labels"]
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=a_ , )
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
return inputs_dict
def _a ( self ) -> Optional[int]:
_UpperCAmelCase = OpenAIGPTModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=a_ , n_embd=37 )
def _a ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _a ( self ) -> Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*a_ )
def _a ( self ) -> Tuple:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*a_ )
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*a_ )
@slow
def _a ( self ) -> int:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = OpenAIGPTModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def _a ( self ) -> Any:
_UpperCAmelCase = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" )
model.to(a_ )
_UpperCAmelCase = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=a_ ) # the president is
_UpperCAmelCase = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
_UpperCAmelCase = model.generate(a_ , do_sample=a_ )
self.assertListEqual(output_ids[0].tolist() , a_ )
| 657 | 0 |
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
UpperCAmelCase_ = logging.getLogger(__name__)
class __UpperCamelCase ( A__ ):
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ):
super().__init__(
_UpperCamelCase , question_encoder_tokenizer=_UpperCamelCase , generator_tokenizer=_UpperCamelCase , index=_UpperCamelCase , init_retrieval=_UpperCamelCase , )
_UpperCAmelCase = None
def UpperCamelCase( self , _UpperCamelCase ):
logger.info('''initializing retrieval''' )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info('''dist initialized''' )
# needs to be set manually
_UpperCAmelCase = self._infer_socket_ifname()
# avoid clash with the NCCL port
_UpperCAmelCase = str(distributed_port + 1 )
_UpperCAmelCase = dist.new_group(ranks=_UpperCamelCase , backend='''gloo''' )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info('''dist not initialized / main''' )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def UpperCamelCase( self ):
return dist.get_rank(group=self.process_group ) == 0
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=torch.floataa ):
_UpperCAmelCase = torch.empty(_UpperCamelCase , dtype=_UpperCamelCase )
dist.scatter(_UpperCamelCase , src=0 , scatter_list=_UpperCamelCase , group=self.process_group )
return target_tensor
def UpperCamelCase( self ):
_UpperCAmelCase = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
_UpperCAmelCase = next((addr for addr in addrs if addr.startswith('''e''' )) , _UpperCamelCase )
return ifname
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase ):
# single GPU training
if not dist.is_initialized():
_UpperCAmelCase , _UpperCAmelCase = self._main_retrieve(_UpperCamelCase , _UpperCamelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_UpperCamelCase )
# distributed training
_UpperCAmelCase = dist.get_world_size(group=self.process_group )
# gather logic
_UpperCAmelCase = None
if self._is_main():
_UpperCAmelCase = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(_UpperCamelCase )]
dist.gather(torch.tensor(_UpperCamelCase ) , dst=0 , gather_list=_UpperCamelCase , group=self.process_group )
# scatter logic
_UpperCAmelCase = question_hidden_states.shape[0]
_UpperCAmelCase = []
_UpperCAmelCase = []
if self._is_main():
assert len(_UpperCamelCase ) == world_size
_UpperCAmelCase , _UpperCAmelCase = self._main_retrieve(torch.cat(_UpperCamelCase ).numpy() , _UpperCamelCase )
_UpperCAmelCase , _UpperCAmelCase = torch.tensor(_UpperCamelCase ), torch.tensor(_UpperCamelCase )
_UpperCAmelCase = self._chunk_tensor(_UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase = self._chunk_tensor(_UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase = self._scattered(_UpperCamelCase , [n_queries, n_docs] , target_type=torch.intaa )
_UpperCAmelCase = self._scattered(_UpperCamelCase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(_UpperCamelCase ) | 32 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__=10 ):
"""simple docstring"""
_UpperCAmelCase = []
for _ in range(UpperCamelCase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__=10 ):
"""simple docstring"""
_UpperCAmelCase = []
for step in range(UpperCamelCase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = os.path.join(UpperCamelCase__ , "schedule.bin" )
torch.save(scheduler.state_dict() , UpperCamelCase__ )
_UpperCAmelCase = torch.load(UpperCamelCase__ )
scheduler.load_state_dict(UpperCamelCase__ )
return lrs
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def _a ( self , a_ , a_ , a_ ) -> Optional[int]:
self.assertEqual(len(a_ ) , len(a_ ) )
for a, b in zip(a_ , a_ ):
self.assertAlmostEqual(a_ , a_ , delta=a_ )
def _a ( self ) -> str:
_UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=a_ )
_UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
_UpperCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_UpperCAmelCase = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(100 ):
_UpperCAmelCase = criterion(a_ , a_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=a_ )
_UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
_UpperCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_UpperCAmelCase = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=a_ , weight_decay=0.0 , relative_step=a_ , scale_parameter=a_ , warmup_init=a_ , )
for _ in range(1000 ):
_UpperCAmelCase = criterion(a_ , a_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
lowercase_ : List[Any] = nn.Linear(50 , 50 ) if is_torch_available() else None
lowercase_ : Tuple = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
lowercase_ : Dict = 10
def _a ( self , a_ , a_ , a_ , a_=None ) -> Union[str, Any]:
self.assertEqual(len(a_ ) , len(a_ ) )
for a, b in zip(a_ , a_ ):
self.assertAlmostEqual(a_ , a_ , delta=a_ , msg=a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
_UpperCAmelCase = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
_UpperCAmelCase , _UpperCAmelCase = data
_UpperCAmelCase = scheduler_func(self.optimizer , **a_ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
_UpperCAmelCase = unwrap_schedule(a_ , self.num_steps )
self.assertListAlmostEqual(
a_ , a_ , tol=1e-2 , msg=f"failed for {scheduler_func} in normal scheduler" , )
_UpperCAmelCase = scheduler_func(self.optimizer , **a_ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(a_ ) # wrap to test picklability of the schedule
_UpperCAmelCase = unwrap_and_save_reload_schedule(a_ , self.num_steps )
self.assertListEqual(a_ , a_ , msg=f"failed for {scheduler_func} in save and reload" )
class _lowerCAmelCase :
def __init__( self , a_ ) -> Union[str, Any]:
_UpperCAmelCase = fn
def __call__( self , *a_ , **a_ ) -> Union[str, Any]:
return self.fn(*a_ , **a_ )
@classmethod
def _a ( self , a_ ) -> Dict:
_UpperCAmelCase = list(map(self , scheduler.lr_lambdas ) )
| 657 | 0 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
lowerCamelCase__ : Tuple = """bart"""
lowerCamelCase__ : Dict = True
@st.cache(allow_output_mutation=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
if LOAD_DENSE_INDEX:
snake_case__ = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
snake_case__ = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
snake_case__ = qar_model.eval()
else:
snake_case__ , snake_case__ = (None, None)
if MODEL_TYPE == "bart":
snake_case__ = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
snake_case__ = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
snake_case__ = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
snake_case__ = sas_model.eval()
else:
snake_case__ , snake_case__ = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( ) -> int:
if LOAD_DENSE_INDEX:
snake_case__ = faiss.StandardGpuResources()
snake_case__ = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
snake_case__ = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , )
snake_case__ = faiss.IndexFlatIP(128 )
snake_case__ = faiss.index_cpu_to_gpu(__lowerCAmelCase , 1 , __lowerCAmelCase )
wikiaab_gpu_index_flat.add(__lowerCAmelCase ) # TODO fix for larger GPU
else:
snake_case__ , snake_case__ = (None, None)
snake_case__ = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
snake_case__ = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
snake_case__ = elia['''train_eli5''']
snake_case__ = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) )
snake_case__ = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(__lowerCAmelCase )
return (elia_train, eli5_train_q_index)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = load_indexes()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] = load_models()
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = load_train_data()
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase=10 ) -> List[Any]:
snake_case__ = embed_questions_for_retrieval([question] , __lowerCAmelCase , __lowerCAmelCase )
snake_case__ , snake_case__ = eli5_train_q_index.search(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ = [elia_train[int(__lowerCAmelCase )] for i in I[0]]
return nn_examples
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase="wiki40b" , __lowerCAmelCase="dense" , __lowerCAmelCase=10 ) -> int:
if source == "none":
snake_case__ , snake_case__ = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
snake_case__ , snake_case__ = query_qa_dense_index(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
snake_case__ , snake_case__ = query_es_index(
__lowerCAmelCase , __lowerCAmelCase , index_name='''english_wiki40b_snippets_100w''' , n_results=__lowerCAmelCase , )
snake_case__ = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
snake_case__ = '''question: {} context: {}'''.format(__lowerCAmelCase , __lowerCAmelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda __lowerCAmelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda __lowerCAmelCase : None),
} )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=64 , __lowerCAmelCase=256 , __lowerCAmelCase=False , __lowerCAmelCase=2 , __lowerCAmelCase=0.95 , __lowerCAmelCase=0.8 ) -> Dict:
with torch.no_grad():
snake_case__ = qa_sas_generate(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , num_answers=1 , num_beams=__lowerCAmelCase , min_len=__lowerCAmelCase , max_len=__lowerCAmelCase , do_sample=__lowerCAmelCase , temp=__lowerCAmelCase , top_p=__lowerCAmelCase , top_k=__lowerCAmelCase , max_input_length=1024 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
lowerCamelCase__ : int = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
lowerCamelCase__ : List[Any] = """
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
lowerCamelCase__ : Any = """
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
lowerCamelCase__ : str = [
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
lowerCamelCase__ : Tuple = st.sidebar.checkbox("""Demo options""")
if demo_options:
lowerCamelCase__ : List[str] = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
lowerCamelCase__ : Union[str, Any] = action_list.index(action_st)
lowerCamelCase__ : List[Any] = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
lowerCamelCase__ : int = show_type == """Show full text of passages"""
else:
lowerCamelCase__ : str = 3
lowerCamelCase__ : Any = True
lowerCamelCase__ : Tuple = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
lowerCamelCase__ : Optional[int] = """
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
lowerCamelCase__ : Optional[int] = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
lowerCamelCase__ : int = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
lowerCamelCase__ : str = """wiki40b"""
lowerCamelCase__ : str = """dense"""
lowerCamelCase__ : int = """beam"""
lowerCamelCase__ : Optional[int] = 2
lowerCamelCase__ : int = 6_4
lowerCamelCase__ : List[Any] = 2_5_6
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : List[str] = None
lowerCamelCase__ : str = st.sidebar.checkbox("""Generation options""")
if generate_options:
lowerCamelCase__ : int = """
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
lowerCamelCase__ : List[str] = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
lowerCamelCase__ : Optional[Any] = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=2_5_6, value=6_4, step=8, format=None, key=None
)
lowerCamelCase__ : Optional[int] = st.sidebar.slider(
"""Maximum generation length""", min_value=6_4, max_value=5_1_2, value=2_5_6, step=1_6, format=None, key=None
)
if sampled == "beam":
lowerCamelCase__ : Optional[int] = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
lowerCamelCase__ : Optional[Any] = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None
)
lowerCamelCase__ : Union[str, Any] = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None
)
lowerCamelCase__ : Dict = None
# start main text
lowerCamelCase__ : Union[str, Any] = [
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
lowerCamelCase__ : Optional[Any] = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
lowerCamelCase__ : Dict = st.text_input("""Enter your question here:""", """""")
else:
lowerCamelCase__ : Union[str, Any] = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
lowerCamelCase__ , lowerCamelCase__ : List[str] = make_support(question, source=wiki_source, method="""dense""", n_results=1_0)
lowerCamelCase__ , lowerCamelCase__ : Any = make_support(question, source=wiki_source, method="""sparse""", n_results=1_0)
lowerCamelCase__ : Optional[Any] = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
lowerCamelCase__ : Union[str, Any] = support_list[:1_0]
lowerCamelCase__ : Optional[Any] = """<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = make_support(question, source=wiki_source, method=index_type, n_results=1_0)
if action in [0, 3]:
lowerCamelCase__ , lowerCamelCase__ : Any = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
lowerCamelCase__ : Tuple = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
lowerCamelCase__ : List[Any] = res[1].strip()
if sec_titles == "":
lowerCamelCase__ : int = """[{}]({})""".format(res[0], wiki_url)
else:
lowerCamelCase__ : str = sec_titles.split(""" & """)
lowerCamelCase__ : int = """ & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
lowerCamelCase__ : List[Any] = find_nearest_training(question)
lowerCamelCase__ : int = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
lowerCamelCase__ : Optional[int] = [
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
lowerCamelCase__ : Union[str, Any] = """
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 33 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __lowerCamelCase ( UpperCamelCase__=None ):
"""simple docstring"""
if subparsers is not None:
_UpperCAmelCase = subparsers.add_parser("test" )
else:
_UpperCAmelCase = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=UpperCamelCase__ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase__ )
return parser
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
_UpperCAmelCase = script_name
else:
_UpperCAmelCase = f"--config_file={args.config_file} {script_name}"
_UpperCAmelCase = ["accelerate-launch"] + test_args.split()
_UpperCAmelCase = execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = test_command_parser()
_UpperCAmelCase = parser.parse_args()
test_command(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 657 | 0 |
"""simple docstring"""
import random
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase , UpperCamelCase = [], [], []
for element in data:
if element < pivot:
less.append(_lowercase )
elif element > pivot:
greater.append(_lowercase )
else:
equal.append(_lowercase )
return less, equal, greater
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
if index >= len(_lowercase ) or index < 0:
return None
UpperCamelCase = items[random.randint(0 ,len(_lowercase ) - 1 )]
UpperCamelCase = 0
UpperCamelCase , UpperCamelCase , UpperCamelCase = _partition(_lowercase ,_lowercase )
UpperCamelCase = len(_lowercase )
UpperCamelCase = len(_lowercase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_lowercase ,_lowercase )
# must be in larger
else:
return quick_select(_lowercase ,index - (m + count) ) | 34 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
return 10 - x * x
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if equation(UpperCamelCase__ ) * equation(UpperCamelCase__ ) >= 0:
raise ValueError("Wrong space!" )
_UpperCAmelCase = a
while (b - a) >= 0.01:
# Find middle point
_UpperCAmelCase = (a + b) / 2
# Check if middle point is root
if equation(UpperCamelCase__ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(UpperCamelCase__ ) * equation(UpperCamelCase__ ) < 0:
_UpperCAmelCase = c
else:
_UpperCAmelCase = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 657 | 0 |
from math import factorial
def a ( A__ = 2_0 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
SCREAMING_SNAKE_CASE__ : Dict = n // 2
return int(factorial(A__ ) / (factorial(A__ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
a_ :str = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 35 |
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
lowercase_ : Tuple = [r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self , a_ , a_ , a_ = None , a_ = 50257 , a_ = 1024 , a_ = 768 , a_ = 12 , a_ = 12 , a_ = None , a_ = "gelu_new" , a_ = 0.1 , a_ = 0.1 , a_ = 0.1 , a_ = 1e-5 , a_ = 0.02 , a_ = True , a_ = True , a_ = False , a_ = False , ) -> List[str]:
super().__init__()
_UpperCAmelCase = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"
f" `n_embd`: {n_embd} are not equal." )
_UpperCAmelCase = prefix_inner_dim
_UpperCAmelCase = prefix_hidden_dim
_UpperCAmelCase = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
_UpperCAmelCase = (
nn.Linear(self.prefix_hidden_dim , a_ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
_UpperCAmelCase = GPTaConfig(
vocab_size=a_ , n_positions=a_ , n_embd=a_ , n_layer=a_ , n_head=a_ , n_inner=a_ , activation_function=a_ , resid_pdrop=a_ , embd_pdrop=a_ , attn_pdrop=a_ , layer_norm_epsilon=a_ , initializer_range=a_ , scale_attn_weights=a_ , use_cache=a_ , scale_attn_by_inverse_layer_idx=a_ , reorder_and_upcast_attn=a_ , )
_UpperCAmelCase = GPTaLMHeadModel(a_ )
def _a ( self , a_ , a_ , a_ = None , a_ = None , ) -> Tuple:
_UpperCAmelCase = self.transformer.transformer.wte(a_ )
_UpperCAmelCase = self.encode_prefix(a_ )
_UpperCAmelCase = self.decode_prefix(a_ )
_UpperCAmelCase = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
_UpperCAmelCase = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
_UpperCAmelCase = torch.cat((dummy_token, input_ids) , dim=1 )
_UpperCAmelCase = self.transformer(inputs_embeds=a_ , labels=a_ , attention_mask=a_ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def _a ( self , a_ , a_ ) -> torch.Tensor:
return torch.zeros(a_ , self.prefix_length , dtype=torch.intaa , device=a_ )
def _a ( self , a_ ) -> Union[str, Any]:
return self.encode_prefix(a_ )
@torch.no_grad()
def _a ( self , a_ , a_ , a_ ) -> Union[str, Any]:
_UpperCAmelCase = torch.split(a_ , 1 , dim=0 )
_UpperCAmelCase = []
_UpperCAmelCase = []
for feature in features:
_UpperCAmelCase = self.decode_prefix(feature.to(a_ ) ) # back to the clip feature
# Only support beam search for now
_UpperCAmelCase , _UpperCAmelCase = self.generate_beam(
input_embeds=a_ , device=a_ , eos_token_id=a_ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
_UpperCAmelCase = torch.stack(a_ )
_UpperCAmelCase = torch.stack(a_ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def _a ( self , a_=None , a_=None , a_=None , a_ = 5 , a_ = 67 , a_ = 1.0 , a_ = None , ) -> Optional[Any]:
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = torch.ones(a_ , device=a_ , dtype=torch.int )
_UpperCAmelCase = torch.zeros(a_ , device=a_ , dtype=torch.bool )
if input_embeds is not None:
_UpperCAmelCase = input_embeds
else:
_UpperCAmelCase = self.transformer.transformer.wte(a_ )
for i in range(a_ ):
_UpperCAmelCase = self.transformer(inputs_embeds=a_ )
_UpperCAmelCase = outputs.logits
_UpperCAmelCase = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
_UpperCAmelCase = logits.softmax(-1 ).log()
if scores is None:
_UpperCAmelCase , _UpperCAmelCase = logits.topk(a_ , -1 )
_UpperCAmelCase = generated.expand(a_ , *generated.shape[1:] )
_UpperCAmelCase , _UpperCAmelCase = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
_UpperCAmelCase = next_tokens
else:
_UpperCAmelCase = tokens.expand(a_ , *tokens.shape[1:] )
_UpperCAmelCase = torch.cat((tokens, next_tokens) , dim=1 )
else:
_UpperCAmelCase = -float(np.inf )
_UpperCAmelCase = 0
_UpperCAmelCase = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
_UpperCAmelCase = scores_sum / seq_lengths[:, None]
_UpperCAmelCase , _UpperCAmelCase = scores_sum_average.view(-1 ).topk(a_ , -1 )
_UpperCAmelCase = next_tokens // scores_sum.shape[1]
_UpperCAmelCase = seq_lengths[next_tokens_source]
_UpperCAmelCase = next_tokens % scores_sum.shape[1]
_UpperCAmelCase = next_tokens.unsqueeze(1 )
_UpperCAmelCase = tokens[next_tokens_source]
_UpperCAmelCase = torch.cat((tokens, next_tokens) , dim=1 )
_UpperCAmelCase = generated[next_tokens_source]
_UpperCAmelCase = scores_sum_average * seq_lengths
_UpperCAmelCase = is_stopped[next_tokens_source]
_UpperCAmelCase = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
_UpperCAmelCase = torch.cat((generated, next_token_embed) , dim=1 )
_UpperCAmelCase = is_stopped + next_tokens.eq(a_ ).squeeze()
if is_stopped.all():
break
_UpperCAmelCase = scores / seq_lengths
_UpperCAmelCase = scores.argsort(descending=a_ )
# tokens tensors are already padded to max_seq_length
_UpperCAmelCase = [tokens[i] for i in order]
_UpperCAmelCase = torch.stack(a_ , dim=0 )
_UpperCAmelCase = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 657 | 0 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _A :
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=3 ,SCREAMING_SNAKE_CASE_=4 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=7 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=99 ,SCREAMING_SNAKE_CASE_=36 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=4 ,SCREAMING_SNAKE_CASE_=37 ,SCREAMING_SNAKE_CASE_="gelu" ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=512 ,SCREAMING_SNAKE_CASE_=16 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=6 ,SCREAMING_SNAKE_CASE_=6 ,SCREAMING_SNAKE_CASE_=3 ,SCREAMING_SNAKE_CASE_=4 ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=1000 ,):
'''simple docstring'''
snake_case : int = parent
snake_case : Optional[Any] = batch_size
snake_case : List[Any] = num_channels
snake_case : Optional[Any] = image_size
snake_case : List[Any] = patch_size
snake_case : List[str] = is_training
snake_case : Union[str, Any] = use_input_mask
snake_case : Tuple = use_token_type_ids
snake_case : str = use_labels
snake_case : int = vocab_size
snake_case : Optional[Any] = hidden_size
snake_case : int = num_hidden_layers
snake_case : Dict = num_attention_heads
snake_case : Any = intermediate_size
snake_case : int = hidden_act
snake_case : Any = hidden_dropout_prob
snake_case : int = attention_probs_dropout_prob
snake_case : List[str] = max_position_embeddings
snake_case : List[str] = type_vocab_size
snake_case : List[Any] = type_sequence_label_size
snake_case : Tuple = initializer_range
snake_case : Optional[Any] = coordinate_size
snake_case : Optional[Any] = shape_size
snake_case : Dict = num_labels
snake_case : Tuple = num_choices
snake_case : str = scope
snake_case : Optional[Any] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
snake_case : Dict = text_seq_length
snake_case : Optional[Any] = (image_size // patch_size) ** 2 + 1
snake_case : Union[str, Any] = self.text_seq_length + self.image_seq_length
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Dict = ids_tensor([self.batch_size, self.text_seq_length] ,self.vocab_size )
snake_case : Tuple = ids_tensor([self.batch_size, self.text_seq_length, 4] ,self.range_bbox )
snake_case : Optional[int] = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
snake_case : Dict = bbox[i, j, 3]
snake_case : Dict = bbox[i, j, 1]
snake_case : Optional[int] = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
snake_case : Dict = bbox[i, j, 2]
snake_case : Optional[Any] = bbox[i, j, 0]
snake_case : Optional[int] = tmp_coordinate
snake_case : Optional[Any] = tf.constant(SCREAMING_SNAKE_CASE_ )
snake_case : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case : List[str] = None
if self.use_input_mask:
snake_case : str = random_attention_mask([self.batch_size, self.text_seq_length] )
snake_case : Optional[int] = None
if self.use_token_type_ids:
snake_case : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] ,self.type_vocab_size )
snake_case : Tuple = None
snake_case : Union[str, Any] = None
if self.use_labels:
snake_case : List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
snake_case : Any = ids_tensor([self.batch_size, self.text_seq_length] ,self.num_labels )
snake_case : Any = LayoutLMvaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,coordinate_size=self.coordinate_size ,shape_size=self.shape_size ,input_size=self.image_size ,patch_size=self.patch_size ,)
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : str = TFLayoutLMvaModel(config=SCREAMING_SNAKE_CASE_ )
# text + image
snake_case : Optional[int] = model(SCREAMING_SNAKE_CASE_ ,pixel_values=SCREAMING_SNAKE_CASE_ ,training=SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = model(
SCREAMING_SNAKE_CASE_ ,bbox=SCREAMING_SNAKE_CASE_ ,pixel_values=SCREAMING_SNAKE_CASE_ ,attention_mask=SCREAMING_SNAKE_CASE_ ,token_type_ids=SCREAMING_SNAKE_CASE_ ,training=SCREAMING_SNAKE_CASE_ ,)
snake_case : int = model(SCREAMING_SNAKE_CASE_ ,bbox=SCREAMING_SNAKE_CASE_ ,pixel_values=SCREAMING_SNAKE_CASE_ ,training=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
# text only
snake_case : str = model(SCREAMING_SNAKE_CASE_ ,training=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
snake_case : Optional[int] = model({"""pixel_values""": pixel_values} ,training=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.image_seq_length, self.hidden_size) )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Dict = self.num_labels
snake_case : Optional[int] = TFLayoutLMvaForSequenceClassification(config=SCREAMING_SNAKE_CASE_ )
snake_case : int = model(
SCREAMING_SNAKE_CASE_ ,bbox=SCREAMING_SNAKE_CASE_ ,pixel_values=SCREAMING_SNAKE_CASE_ ,attention_mask=SCREAMING_SNAKE_CASE_ ,token_type_ids=SCREAMING_SNAKE_CASE_ ,labels=SCREAMING_SNAKE_CASE_ ,training=SCREAMING_SNAKE_CASE_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : List[str] = self.num_labels
snake_case : Optional[int] = TFLayoutLMvaForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
snake_case : Optional[Any] = model(
SCREAMING_SNAKE_CASE_ ,bbox=SCREAMING_SNAKE_CASE_ ,pixel_values=SCREAMING_SNAKE_CASE_ ,attention_mask=SCREAMING_SNAKE_CASE_ ,token_type_ids=SCREAMING_SNAKE_CASE_ ,labels=SCREAMING_SNAKE_CASE_ ,training=SCREAMING_SNAKE_CASE_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.text_seq_length, self.num_labels) )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Optional[int] = 2
snake_case : Dict = TFLayoutLMvaForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
snake_case : Tuple = model(
SCREAMING_SNAKE_CASE_ ,bbox=SCREAMING_SNAKE_CASE_ ,pixel_values=SCREAMING_SNAKE_CASE_ ,attention_mask=SCREAMING_SNAKE_CASE_ ,token_type_ids=SCREAMING_SNAKE_CASE_ ,start_positions=SCREAMING_SNAKE_CASE_ ,end_positions=SCREAMING_SNAKE_CASE_ ,training=SCREAMING_SNAKE_CASE_ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Optional[Any] = self.prepare_config_and_inputs()
((snake_case) , (snake_case) , (snake_case) , (snake_case) , (snake_case) , (snake_case) , (snake_case) , (snake_case)) : Dict = config_and_inputs
snake_case : Union[str, Any] = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_tf
class _A ( snake_case , snake_case , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
__lowerCamelCase : List[str] = (
{'''document-question-answering''': TFLayoutLMvaForQuestionAnswering, '''feature-extraction''': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
__lowerCamelCase : Optional[Any] = False
__lowerCamelCase : List[Any] = False
__lowerCamelCase : Any = False
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return True
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=False ):
'''simple docstring'''
snake_case : str = copy.deepcopy(SCREAMING_SNAKE_CASE_ )
if model_class in get_values(SCREAMING_SNAKE_CASE_ ):
snake_case : Any = {
k: tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ ,1 ) ,(1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(SCREAMING_SNAKE_CASE_ ,tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE_ ):
snake_case : Optional[int] = tf.ones(self.model_tester.batch_size ,dtype=tf.intaa )
elif model_class in get_values(SCREAMING_SNAKE_CASE_ ):
snake_case : Dict = tf.zeros(self.model_tester.batch_size ,dtype=tf.intaa )
snake_case : Tuple = tf.zeros(self.model_tester.batch_size ,dtype=tf.intaa )
elif model_class in get_values(SCREAMING_SNAKE_CASE_ ):
snake_case : List[Any] = tf.zeros(self.model_tester.batch_size ,dtype=tf.intaa )
elif model_class in get_values(SCREAMING_SNAKE_CASE_ ):
snake_case : Dict = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) ,dtype=tf.intaa )
return inputs_dict
def snake_case_ ( self ):
'''simple docstring'''
snake_case : str = TFLayoutLMvaModelTester(self )
snake_case : str = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE_ ,hidden_size=37 )
def snake_case_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case_ ( self ):
'''simple docstring'''
snake_case , snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : List[str] = model_class(SCREAMING_SNAKE_CASE_ )
if getattr(SCREAMING_SNAKE_CASE_ ,"""hf_compute_loss""" ,SCREAMING_SNAKE_CASE_ ):
# The number of elements in the loss should be the same as the number of elements in the label
snake_case : Optional[Any] = self._prepare_for_class(inputs_dict.copy() ,SCREAMING_SNAKE_CASE_ ,return_labels=SCREAMING_SNAKE_CASE_ )
snake_case : List[str] = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() ,reverse=SCREAMING_SNAKE_CASE_ )[0]
]
snake_case : List[Any] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
snake_case : Tuple = self._prepare_for_class(inputs_dict.copy() ,SCREAMING_SNAKE_CASE_ ,return_labels=SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = prepared_for_class.pop("""input_ids""" )
snake_case : Dict = model(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
snake_case : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() ,SCREAMING_SNAKE_CASE_ ,return_labels=SCREAMING_SNAKE_CASE_ )
snake_case : Any = prepared_for_class.pop("""input_ids""" )
if "labels" in prepared_for_class:
snake_case : Union[str, Any] = prepared_for_class["""labels"""].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
snake_case : Dict = -100
snake_case : Optional[Any] = tf.convert_to_tensor(SCREAMING_SNAKE_CASE_ )
snake_case : int = model(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
snake_case : str = self._prepare_for_class(inputs_dict.copy() ,SCREAMING_SNAKE_CASE_ ,return_labels=SCREAMING_SNAKE_CASE_ )
snake_case : Dict = model(SCREAMING_SNAKE_CASE_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
snake_case : Any = self._prepare_for_class(inputs_dict.copy() ,SCREAMING_SNAKE_CASE_ ,return_labels=SCREAMING_SNAKE_CASE_ )
# Get keys that were added with the _prepare_for_class function
snake_case : Optional[Any] = prepared_for_class.keys() - inputs_dict.keys()
snake_case : Optional[int] = inspect.signature(model.call ).parameters
snake_case : Tuple = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
snake_case : Optional[Any] = {0: """input_ids"""}
for label_key in label_keys:
snake_case : Dict = signature_names.index(SCREAMING_SNAKE_CASE_ )
snake_case : Union[str, Any] = label_key
snake_case : List[str] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
snake_case : Dict = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
snake_case : Optional[Any] = prepared_for_class[value]
snake_case : Optional[int] = tuple(SCREAMING_SNAKE_CASE_ )
# Send to model
snake_case : Union[str, Any] = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def snake_case_ ( self ):
'''simple docstring'''
(
(
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
(
(
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) ,
) : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case : int = type
self.model_tester.create_and_check_model(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
(
(
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) ,
) : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
(
(
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) ,
) : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
(
(
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) ,
) : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
@slow
def snake_case_ ( self ):
'''simple docstring'''
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : str = TFLayoutLMvaModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def lowercase ( ) -> Dict:
'''simple docstring'''
snake_case : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
class _A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case_ ( self ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=SCREAMING_SNAKE_CASE_ ) if is_vision_available() else None
@slow
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Any = TFLayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" )
snake_case : int = self.default_image_processor
snake_case : Optional[Any] = prepare_img()
snake_case : str = image_processor(images=SCREAMING_SNAKE_CASE_ ,return_tensors="""tf""" ).pixel_values
snake_case : Optional[int] = tf.constant([[1, 2]] )
snake_case : List[str] = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) ,axis=0 )
# forward pass
snake_case : List[str] = model(input_ids=SCREAMING_SNAKE_CASE_ ,bbox=SCREAMING_SNAKE_CASE_ ,pixel_values=SCREAMING_SNAKE_CASE_ ,training=SCREAMING_SNAKE_CASE_ )
# verify the logits
snake_case : List[Any] = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape ,SCREAMING_SNAKE_CASE_ )
snake_case : str = tf.constant(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] ,SCREAMING_SNAKE_CASE_ ,atol=1E-4 ) )
| 36 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__magic_name__ = {'''configuration_gpt_neox''': ['''GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXConfig''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''GPTNeoXTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXForCausalLM''',
'''GPTNeoXForQuestionAnswering''',
'''GPTNeoXForSequenceClassification''',
'''GPTNeoXForTokenClassification''',
'''GPTNeoXLayer''',
'''GPTNeoXModel''',
'''GPTNeoXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 0 |
UpperCamelCase : int = [
"""DownloadConfig""",
"""DownloadManager""",
"""DownloadMode""",
"""StreamingDownloadManager""",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 37 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : Union[str, Any] = '''convbert'''
def __init__( self , a_=30522 , a_=768 , a_=12 , a_=12 , a_=3072 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=2 , a_=0.02 , a_=1e-12 , a_=1 , a_=0 , a_=2 , a_=768 , a_=2 , a_=9 , a_=1 , a_=None , **a_ , ) -> Tuple:
super().__init__(
pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ , )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = embedding_size
_UpperCAmelCase = head_ratio
_UpperCAmelCase = conv_kernel_size
_UpperCAmelCase = num_groups
_UpperCAmelCase = classifier_dropout
class _lowerCAmelCase ( lowerCamelCase ):
@property
def _a ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_UpperCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 657 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : str = tempfile.mkdtemp()
snake_case__ : str = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""的""",
"""价""",
"""格""",
"""是""",
"""15""",
"""便""",
"""alex""",
"""##andra""",
""",""",
"""。""",
"""-""",
"""t""",
"""shirt""",
]
snake_case__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
snake_case__ : List[Any] = {
"""do_resize""": True,
"""size""": {"""height""": 2_2_4, """width""": 2_2_4},
"""do_center_crop""": True,
"""crop_size""": {"""height""": 1_8, """width""": 1_8},
"""do_normalize""": True,
"""image_mean""": [0.4814_5466, 0.457_8275, 0.4082_1073],
"""image_std""": [0.2686_2954, 0.2613_0258, 0.2757_7711],
"""do_convert_rgb""": True,
}
snake_case__ : Optional[int] = os.path.join(self.tmpdirname , __SCREAMING_SNAKE_CASE )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
return BertTokenizer.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self ):
snake_case__ : Dict = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
snake_case__ : Optional[Any] = [Image.fromarray(np.moveaxis(__SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = self.get_tokenizer()
snake_case__ : Optional[Any] = self.get_rust_tokenizer()
snake_case__ : Dict = self.get_image_processor()
snake_case__ : int = ChineseCLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
processor_slow.save_pretrained(self.tmpdirname )
snake_case__ : Optional[int] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = ChineseCLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
processor_fast.save_pretrained(self.tmpdirname )
snake_case__ : int = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.tokenizer , __SCREAMING_SNAKE_CASE )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.image_processor , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Any = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case__ : Any = self.get_tokenizer(cls_token="""(CLS)""" , sep_token="""(SEP)""" )
snake_case__ : int = self.get_image_processor(do_normalize=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="""(CLS)""" , sep_token="""(SEP)""" , do_normalize=__SCREAMING_SNAKE_CASE )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : int = self.get_image_processor()
snake_case__ : Tuple = self.get_tokenizer()
snake_case__ : Optional[Any] = ChineseCLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = self.prepare_image_inputs()
snake_case__ : List[str] = image_processor(__SCREAMING_SNAKE_CASE , return_tensors="""np""" )
snake_case__ : Optional[Any] = processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCamelCase ( self ):
snake_case__ : Any = self.get_image_processor()
snake_case__ : Any = self.get_tokenizer()
snake_case__ : Tuple = ChineseCLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = """Alexandra,T-shirt的价格是15便士。"""
snake_case__ : List[Any] = processor(text=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokenizer(__SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self ):
snake_case__ : Any = self.get_image_processor()
snake_case__ : str = self.get_tokenizer()
snake_case__ : Tuple = ChineseCLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
snake_case__ : str = """Alexandra,T-shirt的价格是15便士。"""
snake_case__ : Union[str, Any] = self.prepare_image_inputs()
snake_case__ : Tuple = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(__SCREAMING_SNAKE_CASE ):
processor()
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.get_image_processor()
snake_case__ : Dict = self.get_tokenizer()
snake_case__ : Optional[int] = ChineseCLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case__ : Union[str, Any] = processor.batch_decode(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : int = self.get_image_processor()
snake_case__ : Dict = self.get_tokenizer()
snake_case__ : str = ChineseCLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
snake_case__ : str = """Alexandra,T-shirt的价格是15便士。"""
snake_case__ : List[str] = self.prepare_image_inputs()
snake_case__ : Tuple = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 38 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
return "".join([hex(UpperCamelCase__ )[2:].zfill(2 ).upper() for byte in list(UpperCamelCase__ )] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if (len(UpperCamelCase__ ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(UpperCamelCase__ ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(UpperCamelCase__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''microsoft/biogpt''': '''https://huggingface.co/microsoft/biogpt/resolve/main/config.json''',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = "biogpt"
def __init__( self : Optional[Any] , _UpperCamelCase : List[str]=4_2_3_8_4 , _UpperCamelCase : Tuple=1_0_2_4 , _UpperCamelCase : Dict=2_4 , _UpperCamelCase : List[Any]=1_6 , _UpperCamelCase : str=4_0_9_6 , _UpperCamelCase : List[Any]="gelu" , _UpperCamelCase : List[Any]=0.1 , _UpperCamelCase : int=0.1 , _UpperCamelCase : Dict=1_0_2_4 , _UpperCamelCase : List[str]=0.02 , _UpperCamelCase : List[str]=1e-12 , _UpperCamelCase : Dict=True , _UpperCamelCase : Tuple=True , _UpperCamelCase : Tuple=0.0 , _UpperCamelCase : str=0.0 , _UpperCamelCase : str=1 , _UpperCamelCase : List[str]=0 , _UpperCamelCase : int=2 , **_UpperCamelCase : Tuple , ) ->List[Any]:
snake_case_ = vocab_size
snake_case_ = max_position_embeddings
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = scale_embedding
snake_case_ = use_cache
snake_case_ = layerdrop
snake_case_ = activation_dropout
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase ) | 39 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
try:
_UpperCAmelCase = float(UpperCamelCase__ )
except ValueError:
raise ValueError("Please enter a valid number" )
_UpperCAmelCase = decimal - int(UpperCamelCase__ )
if fractional_part == 0:
return int(UpperCamelCase__ ), 1
else:
_UpperCAmelCase = len(str(UpperCamelCase__ ).split("." )[1] )
_UpperCAmelCase = int(decimal * (10**number_of_frac_digits) )
_UpperCAmelCase = 10**number_of_frac_digits
_UpperCAmelCase , _UpperCAmelCase = denominator, numerator
while True:
_UpperCAmelCase = dividend % divisor
if remainder == 0:
break
_UpperCAmelCase , _UpperCAmelCase = divisor, remainder
_UpperCAmelCase , _UpperCAmelCase = numerator / divisor, denominator / divisor
return int(UpperCamelCase__ ), int(UpperCamelCase__ )
if __name__ == "__main__":
print(f'''{decimal_to_fraction(2) = }''')
print(f'''{decimal_to_fraction(89.0) = }''')
print(f'''{decimal_to_fraction("67") = }''')
print(f'''{decimal_to_fraction("45.0") = }''')
print(f'''{decimal_to_fraction(1.5) = }''')
print(f'''{decimal_to_fraction("6.25") = }''')
print(f'''{decimal_to_fraction("78td") = }''')
| 657 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 40 |
"""simple docstring"""
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_UpperCAmelCase = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
_UpperCAmelCase = f"{src_lang}-{tgt_lang}"
_UpperCAmelCase = f"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n"
model_card_dir.mkdir(parents=UpperCamelCase__ , exist_ok=UpperCamelCase__ )
_UpperCAmelCase = os.path.join(UpperCamelCase__ , "README.md" )
print(f"Generating {path}" )
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as f:
f.write(UpperCamelCase__ )
# make sure we are under the root of the project
__magic_name__ = Path(__file__).resolve().parent.parent.parent
__magic_name__ = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
__magic_name__ = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name)
| 657 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase__ = {
'''configuration_rag''': ['''RagConfig'''],
'''retrieval_rag''': ['''RagRetriever'''],
'''tokenization_rag''': ['''RagTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''RagModel''',
'''RagPreTrainedModel''',
'''RagSequenceForGeneration''',
'''RagTokenForGeneration''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TFRagModel''',
'''TFRagPreTrainedModel''',
'''TFRagSequenceForGeneration''',
'''TFRagTokenForGeneration''',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _lowerCAmelCase ( metaclass=lowerCamelCase ):
lowercase_ : Dict = ['''torch''', '''torchsde''']
def __init__( self , *a_ , **a_ ) -> Optional[int]:
requires_backends(self , ["torch", "torchsde"] )
@classmethod
def _a ( cls , *a_ , **a_ ) -> Optional[Any]:
requires_backends(cls , ["torch", "torchsde"] )
@classmethod
def _a ( cls , *a_ , **a_ ) -> List[Any]:
requires_backends(cls , ["torch", "torchsde"] )
| 657 | 0 |
'''simple docstring'''
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def _UpperCamelCase ( ) -> tuple[list[int], int]:
lowerCamelCase_ = [randint(-10_00 ,10_00 ) for i in range(10 )]
lowerCamelCase_ = randint(-50_00 ,50_00 )
return (arr, r)
A_ = make_dataset()
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> tuple[int, ...]:
for triplet in permutations(__UpperCamelCase ,3 ):
if sum(__UpperCamelCase ) == target:
return tuple(sorted(__UpperCamelCase ) )
return (0, 0, 0)
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> tuple[int, int, int]:
arr.sort()
lowerCamelCase_ = len(__UpperCamelCase )
for i in range(n - 1 ):
lowerCamelCase_ ,lowerCamelCase_ = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def _UpperCamelCase ( ) -> tuple[float, float]:
lowerCamelCase_ = '\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n'
lowerCamelCase_ = '\ntriplet_sum1(*dataset)\n'
lowerCamelCase_ = '\ntriplet_sum2(*dataset)\n'
lowerCamelCase_ = repeat(setup=__UpperCamelCase ,stmt=__UpperCamelCase ,repeat=5 ,number=1_00_00 )
lowerCamelCase_ = repeat(setup=__UpperCamelCase ,stmt=__UpperCamelCase ,repeat=5 ,number=1_00_00 )
return (min(__UpperCamelCase ), min(__UpperCamelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
A_ = solution_times()
print(f'''The time for naive implementation is {times[0]}.''')
print(f'''The time for optimized implementation is {times[1]}.''')
| 42 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
__magic_name__ = logging.get_logger(__name__)
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : Optional[Any] = '''upernet'''
def __init__( self , a_=None , a_=512 , a_=0.02 , a_=[1, 2, 3, 6] , a_=True , a_=0.4 , a_=384 , a_=256 , a_=1 , a_=False , a_=255 , **a_ , ) -> List[Any]:
super().__init__(**a_ )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_UpperCAmelCase = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(a_ , a_ ):
_UpperCAmelCase = backbone_config.get("model_type" )
_UpperCAmelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase = config_class.from_dict(a_ )
_UpperCAmelCase = backbone_config
_UpperCAmelCase = hidden_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = pool_scales
_UpperCAmelCase = use_auxiliary_head
_UpperCAmelCase = auxiliary_loss_weight
_UpperCAmelCase = auxiliary_in_channels
_UpperCAmelCase = auxiliary_channels
_UpperCAmelCase = auxiliary_num_convs
_UpperCAmelCase = auxiliary_concat_input
_UpperCAmelCase = loss_ignore_index
def _a ( self ) -> int:
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.backbone_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
| 657 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 43 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import _LazyModule
__magic_name__ = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 657 | 0 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
UpperCAmelCase_ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCAmelCase__ ( A ):
def __init__( self : Dict,__A : AutoencoderKL,__A : CLIPTextModel,__A : CLIPTokenizer,__A : UNetaDConditionModel,__A : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],__A : StableDiffusionSafetyChecker,__A : CLIPImageProcessor,):
super().__init__()
self.register_modules(
vae=__A,text_encoder=__A,tokenizer=__A,unet=__A,scheduler=__A,safety_checker=__A,feature_extractor=__A,)
def lowerCamelCase_ ( self : str,__A : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_lowerCamelCase : Tuple = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__A )
def lowerCamelCase_ ( self : Tuple ):
self.enable_attention_slicing(__A )
@torch.no_grad()
def __call__( self : List[Any],__A : Union[str, List[str]],__A : int = 5_1_2,__A : int = 5_1_2,__A : int = 5_0,__A : float = 7.5,__A : Optional[Union[str, List[str]]] = None,__A : Optional[int] = 1,__A : float = 0.0,__A : Optional[torch.Generator] = None,__A : Optional[torch.FloatTensor] = None,__A : Optional[str] = "pil",__A : bool = True,__A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None,__A : int = 1,__A : Optional[torch.FloatTensor] = None,**__A : str,):
if isinstance(__A,__A ):
_lowerCamelCase : Optional[int] = 1
elif isinstance(__A,__A ):
_lowerCamelCase : Optional[int] = len(__A )
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(__A )}' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__A,__A ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(__A )}.' )
# get prompt text embeddings
_lowerCamelCase : Optional[Any] = self.tokenizer(
__A,padding="max_length",max_length=self.tokenizer.model_max_length,return_tensors="pt",)
_lowerCamelCase : int = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_lowerCamelCase : Optional[int] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f' {self.tokenizer.model_max_length} tokens: {removed_text}' )
_lowerCamelCase : Union[str, Any] = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
_lowerCamelCase : List[Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = text_embeddings.shape
_lowerCamelCase : Optional[Any] = text_embeddings.repeat(1,__A,1 )
_lowerCamelCase : Any = text_embeddings.view(bs_embed * num_images_per_prompt,__A,-1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_lowerCamelCase : str = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_lowerCamelCase : List[str]
if negative_prompt is None:
_lowerCamelCase : Any = [""]
elif type(__A ) is not type(__A ):
raise TypeError(
f'`negative_prompt` should be the same type to `prompt`, but got {type(__A )} !='
f' {type(__A )}.' )
elif isinstance(__A,__A ):
_lowerCamelCase : Optional[Any] = [negative_prompt]
elif batch_size != len(__A ):
raise ValueError(
f'`negative_prompt`: {negative_prompt} has batch size {len(__A )}, but `prompt`:'
f' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
" the batch size of `prompt`." )
else:
_lowerCamelCase : List[Any] = negative_prompt
_lowerCamelCase : Any = text_input_ids.shape[-1]
_lowerCamelCase : Optional[Any] = self.tokenizer(
__A,padding="max_length",max_length=__A,truncation=__A,return_tensors="pt",)
_lowerCamelCase : List[str] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_lowerCamelCase : Optional[Any] = uncond_embeddings.shape[1]
_lowerCamelCase : Union[str, Any] = uncond_embeddings.repeat(__A,__A,1 )
_lowerCamelCase : Optional[int] = uncond_embeddings.view(batch_size * num_images_per_prompt,__A,-1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCamelCase : List[str] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_lowerCamelCase : Union[str, Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
_lowerCamelCase : List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 6_4, 6_4)
_lowerCamelCase : Optional[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
_lowerCamelCase : List[Any] = torch.randn(
__A,generator=__A,device="cpu",dtype=__A ).to(self.device )
_lowerCamelCase : List[Any] = torch.randn(__A,generator=__A,device="cpu",dtype=__A ).to(
self.device )
else:
_lowerCamelCase : Any = torch.randn(
__A,generator=__A,device=self.device,dtype=__A )
_lowerCamelCase : Any = torch.randn(__A,generator=__A,device=self.device,dtype=__A )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
_lowerCamelCase : Tuple = latents_reference.to(self.device )
_lowerCamelCase : Union[str, Any] = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
_lowerCamelCase : str = (latents_shape[3] - latents_shape_reference[3]) // 2
_lowerCamelCase : Tuple = (latents_shape[2] - latents_shape_reference[2]) // 2
_lowerCamelCase : List[Any] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
_lowerCamelCase : Optional[Any] = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
_lowerCamelCase : int = 0 if dx < 0 else dx
_lowerCamelCase : Dict = 0 if dy < 0 else dy
_lowerCamelCase : Optional[Any] = max(-dx,0 )
_lowerCamelCase : List[Any] = max(-dy,0 )
# import pdb
# pdb.set_trace()
_lowerCamelCase : List[Any] = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(__A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
_lowerCamelCase : List[str] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_lowerCamelCase : Optional[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_lowerCamelCase : List[str] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_lowerCamelCase : int = {}
if accepts_eta:
_lowerCamelCase : List[str] = eta
for i, t in enumerate(self.progress_bar(__A ) ):
# expand the latents if we are doing classifier free guidance
_lowerCamelCase : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCamelCase : Dict = self.scheduler.scale_model_input(__A,__A )
# predict the noise residual
_lowerCamelCase : Any = self.unet(__A,__A,encoder_hidden_states=__A ).sample
# perform guidance
if do_classifier_free_guidance:
_lowerCamelCase , _lowerCamelCase : Dict = noise_pred.chunk(2 )
_lowerCamelCase : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
_lowerCamelCase : Dict = self.scheduler.step(__A,__A,__A,**__A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__A,__A,__A )
_lowerCamelCase : Any = 1 / 0.18215 * latents
_lowerCamelCase : List[str] = self.vae.decode(__A ).sample
_lowerCamelCase : Union[str, Any] = (image / 2 + 0.5).clamp(0,1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_lowerCamelCase : List[Any] = image.cpu().permute(0,2,3,1 ).float().numpy()
if self.safety_checker is not None:
_lowerCamelCase : List[Any] = self.feature_extractor(self.numpy_to_pil(__A ),return_tensors="pt" ).to(
self.device )
_lowerCamelCase , _lowerCamelCase : str = self.safety_checker(
images=__A,clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
_lowerCamelCase : Union[str, Any] = None
if output_type == "pil":
_lowerCamelCase : Any = self.numpy_to_pil(__A )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=__A,nsfw_content_detected=__A ) | 44 |
"""simple docstring"""
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCAmelCase :
def __init__( self , a_ , a_=2 , a_=3 , a_=4 , a_=2 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=99 , a_=36 , a_=3 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=16 , a_=2 , a_=0.02 , a_=6 , a_=6 , a_=3 , a_=4 , a_=None , a_=1000 , ) -> Optional[Any]:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = text_seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = coordinate_size
_UpperCAmelCase = shape_size
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_UpperCAmelCase = text_seq_length
_UpperCAmelCase = (image_size // patch_size) ** 2 + 1
_UpperCAmelCase = self.text_seq_length + self.image_seq_length
def _a ( self ) -> Dict:
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_UpperCAmelCase = bbox[i, j, 3]
_UpperCAmelCase = bbox[i, j, 1]
_UpperCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_UpperCAmelCase = bbox[i, j, 2]
_UpperCAmelCase = bbox[i, j, 0]
_UpperCAmelCase = t
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.text_seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_UpperCAmelCase = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Tuple:
_UpperCAmelCase = LayoutLMvaModel(config=a_ )
model.to(a_ )
model.eval()
# text + image
_UpperCAmelCase = model(a_ , pixel_values=a_ )
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ )
_UpperCAmelCase = model(a_ , bbox=a_ , pixel_values=a_ , token_type_ids=a_ )
_UpperCAmelCase = model(a_ , bbox=a_ , pixel_values=a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_UpperCAmelCase = model(a_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_UpperCAmelCase = model(pixel_values=a_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Optional[Any]:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = LayoutLMvaForSequenceClassification(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Union[str, Any]:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = LayoutLMvaForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Dict:
_UpperCAmelCase = LayoutLMvaForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , start_positions=a_ , end_positions=a_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self ) -> Optional[int]:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase_ : Any = False
lowercase_ : Dict = False
lowercase_ : List[str] = False
lowercase_ : str = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase_ : int = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def _a ( self , a_ , a_ , a_ , a_ , a_ ) -> List[str]:
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = LayoutLMvaModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=a_ , hidden_size=37 )
def _a ( self , a_ , a_ , a_=False ) -> List[str]:
_UpperCAmelCase = copy.deepcopy(a_ )
if model_class in get_values(a_ ):
_UpperCAmelCase = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(a_ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(a_ ):
_UpperCAmelCase = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=a_ )
elif model_class in get_values(a_ ):
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
elif model_class in [
*get_values(a_ ),
]:
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
elif model_class in [
*get_values(a_ ),
]:
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=a_ , )
return inputs_dict
def _a ( self ) -> int:
self.config_tester.run_common_tests()
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase = type
self.model_tester.create_and_check_model(*a_ )
def _a ( self ) -> int:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a_ )
def _a ( self ) -> Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a_ )
@slow
def _a ( self ) -> List[str]:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = LayoutLMvaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _a ( self ) -> List[Any]:
return LayoutLMvaImageProcessor(apply_ocr=a_ ) if is_vision_available() else None
@slow
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(a_ )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=a_ , return_tensors="pt" ).pixel_values.to(a_ )
_UpperCAmelCase = torch.tensor([[1, 2]] )
_UpperCAmelCase = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
_UpperCAmelCase = model(
input_ids=input_ids.to(a_ ) , bbox=bbox.to(a_ ) , pixel_values=pixel_values.to(a_ ) , )
# verify the logits
_UpperCAmelCase = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , a_ )
_UpperCAmelCase = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , a_ , atol=1e-4 ) )
| 657 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=lowercase )
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : str = field(default="""image-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
_snake_case : ClassVar[Features] = Features({"""image""": Image()} )
_snake_case : ClassVar[Features] = Features({"""labels""": ClassLabel} )
_snake_case : str = "image"
_snake_case : str = "labels"
def __a ( self :Any , lowerCamelCase__ :Dict ):
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , lowerCamelCase__ ):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" )
UpperCamelCase__ :Union[str, Any] = copy.deepcopy(self )
UpperCamelCase__ :Any = self.label_schema.copy()
UpperCamelCase__ :int = features[self.label_column]
UpperCamelCase__ :Optional[Any] = label_schema
return task_template
@property
def __a ( self :List[Any] ):
return {
self.image_column: "image",
self.label_column: "labels",
} | 45 |
"""simple docstring"""
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _lowerCAmelCase ( unittest.TestCase ):
lowercase_ : str = MODEL_FOR_MASKED_LM_MAPPING
lowercase_ : List[str] = TF_MODEL_FOR_MASKED_LM_MAPPING
def _a ( self ) -> Optional[Any]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def _a ( self ) -> str:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf" )
_UpperCAmelCase = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is grouped", "score": 2.1e-05, "token": 38015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-05, "token": 25506, "token_str": " accuser"},
] , )
_UpperCAmelCase = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-05,
"token": 38015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-05,
"token": 25506,
"token_str": " accuser",
},
] , )
_UpperCAmelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-05, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt" )
_UpperCAmelCase = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is Maul", "score": 2.2e-05, "token": 35676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
_UpperCAmelCase = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
_UpperCAmelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is Patrick", "score": 2.1e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-05, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
] , )
_UpperCAmelCase = unmasker("My name is <mask> <mask>" , top_k=2 )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def _a ( self ) -> int:
_UpperCAmelCase = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt" )
# convert model to fp16
pipe.model.half()
_UpperCAmelCase = pipe("Paris is the [MASK] of France." )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(a_ , a_ )
@slow
@require_torch
def _a ( self ) -> int:
_UpperCAmelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt" )
self.run_large_test(a_ )
@slow
@require_tf
def _a ( self ) -> int:
_UpperCAmelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf" )
self.run_large_test(a_ )
def _a ( self , a_ ) -> int:
_UpperCAmelCase = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(a_ ) , [
{"sequence": "My name is John", "score": 0.008, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.007, "token": 1573, "token_str": " Chris"},
] , )
_UpperCAmelCase = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(a_ ) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.251,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.214,
"token": 12790,
"token_str": " Lyon",
},
] , )
_UpperCAmelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(a_ ) , [
{"sequence": "My name is Patrick", "score": 0.005, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.000, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.000, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def _a ( self ) -> Any:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt" )
_UpperCAmelCase = None
_UpperCAmelCase = None
self.run_pipeline_test(a_ , [] )
@require_tf
def _a ( self ) -> List[Any]:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf" )
_UpperCAmelCase = None
_UpperCAmelCase = None
self.run_pipeline_test(a_ , [] )
def _a ( self , a_ , a_ , a_ ) -> Optional[Any]:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)" )
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = [
f"This is another {tokenizer.mask_token} test",
]
return fill_masker, examples
def _a ( self , a_ , a_ ) -> List[str]:
_UpperCAmelCase = fill_masker.tokenizer
_UpperCAmelCase = fill_masker.model
_UpperCAmelCase = fill_masker(
f"This is a {tokenizer.mask_token}" , )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = fill_masker([f"This is a {tokenizer.mask_token}"] )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = fill_masker([f"This is a {tokenizer.mask_token}", f"Another {tokenizer.mask_token} great test."] )
self.assertEqual(
a_ , [
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
] , )
with self.assertRaises(a_ ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(a_ ):
fill_masker("This is" )
self.run_test_top_k(a_ , a_ )
self.run_test_targets(a_ , a_ )
self.run_test_top_k_targets(a_ , a_ )
self.fill_mask_with_duplicate_targets_and_top_k(a_ , a_ )
self.fill_mask_with_multiple_masks(a_ , a_ )
def _a ( self , a_ , a_ ) -> Optional[int]:
_UpperCAmelCase = tokenizer.get_vocab()
_UpperCAmelCase = sorted(vocab.keys() )[:2]
# Pipeline argument
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ , targets=a_ )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , a_ )
_UpperCAmelCase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(a_ ) )
# Call argument
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=a_ )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , a_ )
_UpperCAmelCase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(a_ ) )
# Score equivalence
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=a_ )
_UpperCAmelCase = [top_mask["token_str"] for top_mask in outputs]
_UpperCAmelCase = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(a_ ) == set(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=a_ )
_UpperCAmelCase = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(a_ ) , nested_simplify(a_ ) )
# Raises with invalid
with self.assertRaises(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=[""] )
with self.assertRaises(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets="" )
def _a ( self , a_ , a_ ) -> str:
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ , top_k=2 )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
self.assertEqual(nested_simplify(a_ ) , nested_simplify(a_ ) )
def _a ( self , a_ , a_ ) -> List[Any]:
_UpperCAmelCase = tokenizer.get_vocab()
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
# top_k=2, ntargets=3
_UpperCAmelCase = sorted(vocab.keys() )[:3]
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=2 , targets=a_ )
# If we use the most probably targets, and filter differently, we should still
# have the same results
_UpperCAmelCase = [el["token_str"] for el in sorted(a_ , key=lambda a_ : x["score"] , reverse=a_ )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(a_ ).issubset(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=3 , targets=a_ )
# They should yield exactly the same result
self.assertEqual(nested_simplify(a_ ) , nested_simplify(a_ ) )
def _a ( self , a_ , a_ ) -> Optional[Any]:
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = tokenizer.get_vocab()
# String duplicates + id duplicates
_UpperCAmelCase = sorted(vocab.keys() )[:3]
_UpperCAmelCase = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_UpperCAmelCase = fill_masker(f"My name is {tokenizer.mask_token}" , targets=a_ , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(a_ ) , 3 )
def _a ( self , a_ , a_ ) -> Any:
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = fill_masker(
f"This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
a_ , [
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
] , )
| 657 | 0 |
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
_lowerCAmelCase : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
_lowerCAmelCase : list[int] = [ord(letter) for letter in string.ascii_lowercase]
_lowerCAmelCase : set[int] = {ord(char) for char in VALID_CHARS}
_lowerCAmelCase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> str | None:
'''simple docstring'''
_lowerCamelCase : str = ""
_lowerCamelCase : int
_lowerCamelCase : int
_lowerCamelCase : int
for keychar, cipherchar in zip(cycle(_lowerCamelCase ) , _lowerCamelCase ):
_lowerCamelCase : Optional[Any] = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(_lowerCamelCase )
return decoded
def lowerCamelCase_( _lowerCamelCase ) -> list[str]:
'''simple docstring'''
_lowerCamelCase : list[str] = []
for key in product(_lowerCamelCase , repeat=3 ):
_lowerCamelCase : int = try_key(_lowerCamelCase , _lowerCamelCase )
if encoded is not None:
possibles.append(_lowerCamelCase )
return possibles
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> list[str]:
'''simple docstring'''
return [possible for possible in possibles if common_word in possible.lower()]
def lowerCamelCase_( _lowerCamelCase = "p059_cipher.txt" ) -> int:
'''simple docstring'''
_lowerCamelCase : list[int]
_lowerCamelCase : list[str]
_lowerCamelCase : str
_lowerCamelCase : str
_lowerCamelCase : str = Path(_lowerCamelCase ).parent.joinpath(_lowerCamelCase ).read_text(encoding="utf-8" )
_lowerCamelCase : Optional[int] = [int(_lowerCamelCase ) for number in data.strip().split("," )]
_lowerCamelCase : List[Any] = filter_valid_chars(_lowerCamelCase )
for common_word in COMMON_WORDS:
_lowerCamelCase : Union[str, Any] = filter_common_word(_lowerCamelCase , _lowerCamelCase )
if len(_lowerCamelCase ) == 1:
break
_lowerCamelCase : List[str] = possibles[0]
return sum(ord(_lowerCamelCase ) for char in decoded_text )
if __name__ == "__main__":
print(f'''{solution() = }''') | 46 |
"""simple docstring"""
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class _lowerCAmelCase ( lowerCamelCase ):
def _a ( self ) -> List[str]:
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def _a ( self ) -> Optional[int]:
with self.assertRaises(a_ ):
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def _a ( self ) -> int:
with self.assertRaises(a_ ):
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("bool" ) , type=Value("int64" ) ) )
def _a ( self ) -> Optional[Any]:
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _a ( self ) -> int:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , type=Value("int64" ) ) )
def _a ( self ) -> Dict:
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , try_type=Value("int64" ) ) )
self.assertEqual(arr.type , pa.string() )
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def _a ( self ) -> Tuple:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , type=ArrayaD((1, 3) , "int64" ) ) )
def _a ( self ) -> str:
_UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def _a ( self ) -> Tuple:
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def _a ( self ) -> List[str]:
import PIL.Image
_UpperCAmelCase = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"datasets.arrow_writer.cast_to_python_objects" , side_effect=a_ ) as mock_cast_to_python_objects:
_UpperCAmelCase = pa.array(TypedSequence([{"path": None, "bytes": B"image_bytes"}, pil_image] , type=Image() ) )
_UpperCAmelCase , _UpperCAmelCase = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("optimize_list_casting" , a_ )
self.assertFalse(kwargs["optimize_list_casting"] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferReader(UpperCamelCase__ ) if isinstance(UpperCamelCase__ , pa.Buffer ) else pa.memory_map(UpperCamelCase__ )
_UpperCAmelCase = pa.ipc.open_stream(UpperCamelCase__ )
_UpperCAmelCase = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = Features({"labels": ClassLabel(names=["neg", "pos"] )} )
with ArrowWriter(stream=UpperCamelCase__ , features=UpperCamelCase__ ) as writer:
writer.write({"labels": 0} )
writer.write({"labels": 1} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pa.ipc.open_stream(UpperCamelCase__ )
_UpperCAmelCase = f.read_all()
_UpperCAmelCase = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(UpperCamelCase__ )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ , hash_salt="split_name" , check_duplicates=UpperCamelCase__ , ) as writer:
with pytest.raises(UpperCamelCase__ ):
writer.write({"col_1": "foo", "col_2": 1} , key=[1, 2] )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ , hash_salt="split_name" , check_duplicates=UpperCamelCase__ , ) as writer:
with pytest.raises(UpperCamelCase__ ):
writer.write({"col_1": "foo", "col_2": 1} , key=10 )
writer.write({"col_1": "bar", "col_2": 2} , key=10 )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ , hash_salt="split_name" , check_duplicates=UpperCamelCase__ , ) as writer:
writer.write({"col_1": "foo", "col_2": 1} , key=1 )
writer.write({"col_1": "bar", "col_2": 2} , key=2 )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
writer.write_batch({"col_1": [], "col_2": []} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write_table(pa.Table.from_pydict({"col_1": ["foo", "bar"], "col_2": [1, 2]} ) )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write_row(pa.Table.from_pydict({"col_1": ["foo"], "col_2": [1]} ) )
writer.write_row(pa.Table.from_pydict({"col_1": ["bar"], "col_2": [2]} ) )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __lowerCamelCase ( ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
_UpperCAmelCase = os.path.join(UpperCamelCase__ , "test.arrow" )
with ArrowWriter(path=UpperCamelCase__ , schema=pa.schema(UpperCamelCase__ ) ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(UpperCamelCase__ , 1 )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if pa.types.is_list(UpperCamelCase__ ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if isinstance(lst[0] , UpperCamelCase__ ):
change_first_primitive_element_in_list(lst[0] , UpperCamelCase__ )
else:
_UpperCAmelCase = value
@pytest.mark.parametrize("optimized_int_type, expected_dtype" , [(None, pa.intaa()), (Value("int32" ), pa.intaa())] )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.array(TypedSequence(UpperCamelCase__ , optimized_int_type=UpperCamelCase__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"col, expected_dtype" , [
("attention_mask", pa.inta()),
("special_tokens_mask", pa.inta()),
("token_type_ids", pa.inta()),
("input_ids", pa.intaa()),
("other", pa.intaa()),
] , )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.array(OptimizedTypedSequence(UpperCamelCase__ , col=UpperCamelCase__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
_UpperCAmelCase = copy.deepcopy(UpperCamelCase__ )
_UpperCAmelCase = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase = pa.array(OptimizedTypedSequence(UpperCamelCase__ , col=UpperCamelCase__ ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("raise_exception" , [False, True] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = str(tmp_path / "dataset-train.arrow" )
try:
with ArrowWriter(path=UpperCamelCase__ ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = "mock://dataset-train.arrow"
with ArrowWriter(path=UpperCamelCase__ , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(UpperCamelCase__ ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(UpperCamelCase__ )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(stream=UpperCamelCase__ ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pq.read_table(UpperCamelCase__ )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("embed_local_files" , [False, True] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
import PIL.Image
_UpperCAmelCase = str(tmp_path / "test_image_rgb.jpg" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(UpperCamelCase__ , format="png" )
_UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(
stream=UpperCamelCase__ , features=Features({"image": Image()} ) , embed_local_files=UpperCamelCase__ ) as writer:
writer.write({"image": image_path} )
writer.finalize()
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pq.read_table(UpperCamelCase__ )
_UpperCAmelCase = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["image"][0]["path"] , UpperCamelCase__ )
with open(UpperCamelCase__ , "rb" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = pa.schema([pa.field("col_1" , pa.string() , nullable=UpperCamelCase__ )] )
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(stream=UpperCamelCase__ ) as writer:
writer._build_writer(inferred_schema=UpperCamelCase__ )
assert writer._schema == pa.schema([pa.field("col_1" , pa.string() )] )
| 657 | 0 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : int ):
# save results
if os.path.exists(lowerCamelCase_ ):
if os.path.exists(os.path.join(lowerCamelCase_ , 'config.json' ) ) and os.path.isfile(
os.path.join(lowerCamelCase_ , 'config.json' ) ):
os.remove(os.path.join(lowerCamelCase_ , 'config.json' ) )
if os.path.exists(os.path.join(lowerCamelCase_ , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(lowerCamelCase_ , 'pytorch_model.bin' ) ):
os.remove(os.path.join(lowerCamelCase_ , 'pytorch_model.bin' ) )
else:
os.makedirs(lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
def UpperCAmelCase__ ( lowerCamelCase_ : int , lowerCamelCase_ : Any=False ):
__a : Dict = 2
if unlogit:
__a : Optional[Any] = torch.pow(lowerCamelCase_ , lowerCamelCase_ )
__a : Any = p * torch.log(lowerCamelCase_ )
__a : Union[str, Any] = 0
return -plogp.sum(dim=-1 )
def UpperCAmelCase__ ( lowerCamelCase_ : Any ):
logger.info('lv, h >\t' + '\t'.join(f'''{x + 1}''' for x in range(len(lowerCamelCase_ ) ) ) )
for row in range(len(lowerCamelCase_ ) ):
if tensor.dtype != torch.long:
logger.info(f'''layer {row + 1}:\t''' + '\t'.join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(f'''layer {row + 1}:\t''' + '\t'.join(f'''{x:d}''' for x in tensor[row].cpu().data ) )
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : int=True , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : List[Any]=False ):
__a , __a : Optional[int] = model.config.num_hidden_layers, model.config.num_attention_heads
__a : str = torch.zeros(lowerCamelCase_ , lowerCamelCase_ ).to(args.device )
__a : int = torch.zeros(lowerCamelCase_ , lowerCamelCase_ ).to(args.device )
if head_mask is None:
__a : Union[str, Any] = torch.ones(lowerCamelCase_ , lowerCamelCase_ ).to(args.device )
head_mask.requires_grad_(requires_grad=lowerCamelCase_ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
__a : Any = None
__a : Optional[int] = 0.0
__a : Optional[Any] = 0.0
for step, inputs in enumerate(tqdm(lowerCamelCase_ , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
__a : Dict = tuple(t.to(args.device ) for t in inputs )
((__a) , ) : Dict = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
__a : List[Any] = model(lowerCamelCase_ , labels=lowerCamelCase_ , head_mask=lowerCamelCase_ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
__a , __a , __a : int = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(lowerCamelCase_ ):
__a : List[str] = entropy(attn.detach() , lowerCamelCase_ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(lowerCamelCase_ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
__a : Optional[Any] = 2
__a : Union[str, Any] = torch.pow(torch.pow(lowerCamelCase_ , lowerCamelCase_ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
__a : List[str] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(lowerCamelCase_ )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(lowerCamelCase_ )
logger.info('Head ranked by importance scores' )
__a : Optional[Any] = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
__a : str = torch.arange(
head_importance.numel() , device=args.device )
__a : Tuple = head_ranks.view_as(lowerCamelCase_ )
print_ad_tensor(lowerCamelCase_ )
return attn_entropy, head_importance, total_loss
def UpperCAmelCase__ ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : int ):
__a , __a , __a : Optional[int] = compute_heads_importance(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ )
__a : Tuple = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , lowerCamelCase_ , original_score * args.masking_threshold )
__a : Tuple = torch.ones_like(lowerCamelCase_ )
__a : int = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
__a : Tuple = original_score
while current_score >= original_score * args.masking_threshold:
__a : Optional[Any] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
__a : List[str] = float('Inf' )
__a : List[Any] = head_importance.view(-1 ).sort()[1]
if len(lowerCamelCase_ ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
__a : Any = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
__a : int = new_head_mask.view(-1 )
__a : Tuple = 0.0
__a : int = new_head_mask.view_as(lowerCamelCase_ )
__a : Optional[int] = new_head_mask.clone().detach()
print_ad_tensor(lowerCamelCase_ )
# Compute metric and head importance again
__a , __a , __a : int = compute_heads_importance(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ , head_mask=lowerCamelCase_ )
__a : List[Any] = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , lowerCamelCase_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_0_0 , )
logger.info('Final head mask' )
print_ad_tensor(lowerCamelCase_ )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def UpperCAmelCase__ ( lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ):
__a : List[Any] = datetime.now()
__a , __a , __a : List[str] = compute_heads_importance(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ , compute_importance=lowerCamelCase_ , head_mask=lowerCamelCase_ )
__a : List[str] = 1 / loss
__a : List[Any] = datetime.now() - before_time
__a : List[str] = sum(p.numel() for p in model.parameters() )
__a : Dict = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(lowerCamelCase_ ) )
}
for k, v in heads_to_prune.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
__a : Tuple = [
v,
]
assert sum(len(lowerCamelCase_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(lowerCamelCase_ )
__a : Optional[Any] = sum(p.numel() for p in model.parameters() )
__a : Tuple = datetime.now()
__a , __a , __a : Tuple = compute_heads_importance(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ , compute_importance=lowerCamelCase_ , head_mask=lowerCamelCase_ , actually_pruned=lowerCamelCase_ , )
__a : Optional[Any] = 1 / loss
__a : List[Any] = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , lowerCamelCase_ , lowerCamelCase_ , pruned_num_params / original_num_params * 1_0_0 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , lowerCamelCase_ , lowerCamelCase_ )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_0_0 )
save_model(lowerCamelCase_ , args.output_dir )
def UpperCAmelCase__ ( ):
__a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=lowerCamelCase_ , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=lowerCamelCase_ , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=lowerCamelCase_ , type=lowerCamelCase_ , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=lowerCamelCase_ , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=lowerCamelCase_ , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=lowerCamelCase_ , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=lowerCamelCase_ , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=1_2_8 , type=lowerCamelCase_ , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=lowerCamelCase_ , help='Batch size.' )
parser.add_argument('--seed' , type=lowerCamelCase_ , default=4_2 )
parser.add_argument('--local_rank' , type=lowerCamelCase_ , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=lowerCamelCase_ , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=lowerCamelCase_ , default='' , help='Can be used for distant debugging.' )
__a : Optional[Any] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowerCamelCase_ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
__a : List[str] = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
__a : Tuple = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
__a : Union[str, Any] = torch.device('cuda' , args.local_rank )
__a : Any = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
__a : Optional[Any] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
__a : List[Any] = nn.parallel.DistributedDataParallel(
lowerCamelCase_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=lowerCamelCase_ )
elif args.n_gpu > 1:
__a : Union[str, Any] = nn.DataParallel(lowerCamelCase_ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=lowerCamelCase_ )
torch.save(lowerCamelCase_ , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , lowerCamelCase_ )
# Prepare dataset
__a : Tuple = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
__a : str = (torch.from_numpy(lowerCamelCase_ ),)
__a : List[str] = TensorDataset(*lowerCamelCase_ )
__a : Optional[Any] = RandomSampler(lowerCamelCase_ )
__a : Union[str, Any] = DataLoader(lowerCamelCase_ , sampler=lowerCamelCase_ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
__a : Union[str, Any] = mask_heads(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
prune_heads(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
main()
| 47 |
"""simple docstring"""
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class _lowerCAmelCase ( unittest.TestCase ):
def _a ( self ) -> Optional[Any]:
_UpperCAmelCase = ["a", "b", "c"]
# Defaults to last layer if both are None
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(a_ , a_ , a_ )
self.assertEqual(a_ , ["c"] )
self.assertEqual(a_ , [2] )
# Out indices set to match out features
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(["a", "c"] , a_ , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features set to match out indices
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(a_ , [0, 2] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features selected from negative indices
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(a_ , [-3, -1] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [-3, -1] )
def _a ( self ) -> Optional[int]:
# Stage names must be set
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , a_ )
# Out features must be a list
with self.assertRaises(a_ ):
verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] )
# Out features must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] )
# Out indices must be a list or tuple
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , 0 , ["a", "b"] )
# Out indices must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , (0, 1) , ["a"] )
# Out features and out indices must be the same length
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] )
# Out features should match out indices
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] )
# Out features and out indices should be in order
with self.assertRaises(a_ ):
verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] )
# Check passes with valid inputs
verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] )
def _a ( self ) -> int:
_UpperCAmelCase = BackboneMixin()
_UpperCAmelCase = ["a", "b", "c"]
_UpperCAmelCase = ["a", "c"]
_UpperCAmelCase = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
_UpperCAmelCase = ["a", "b"]
self.assertEqual(backbone.out_features , ["a", "b"] )
self.assertEqual(backbone.out_indices , [0, 1] )
_UpperCAmelCase = [-3, -1]
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 657 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ : Optional[Any] = {"configuration_timm_backbone": ["TimmBackboneConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : List[str] = ["TimmBackbone"]
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
UpperCAmelCase__ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 48 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 0 |
"""simple docstring"""
def lowercase__ ( snake_case_ :str , snake_case_ :str ):
__UpperCAmelCase = len(snake_case_ )
__UpperCAmelCase = len(snake_case_ )
__UpperCAmelCase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__UpperCAmelCase = True
for i in range(snake_case_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__UpperCAmelCase = True
if a[i].islower():
__UpperCAmelCase = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 |
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class _lowerCAmelCase ( lowerCamelCase , unittest.TestCase ):
lowercase_ : Tuple = BarthezTokenizer
lowercase_ : List[Any] = BarthezTokenizerFast
lowercase_ : Dict = True
lowercase_ : int = True
def _a ( self ) -> Any:
super().setUp()
_UpperCAmelCase = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=a_ )
_UpperCAmelCase = tokenizer
def _a ( self ) -> List[Any]:
_UpperCAmelCase = "<pad>"
_UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(a_ ) , 101122 )
def _a ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def _a ( self ) -> List[Any]:
_UpperCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_UpperCAmelCase = [0, 57, 3018, 70307, 91, 2]
_UpperCAmelCase = self.tokenizer(
a_ , max_length=len(a_ ) , padding=a_ , truncation=a_ , return_tensors="pt" )
self.assertIsInstance(a_ , a_ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
_UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(a_ , a_ )
def _a ( self ) -> str:
if not self.test_rust_tokenizer:
return
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = "I was born in 92000, and this is falsé."
_UpperCAmelCase = tokenizer.tokenize(a_ )
_UpperCAmelCase = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
_UpperCAmelCase = tokenizer.encode(a_ , add_special_tokens=a_ )
_UpperCAmelCase = rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = tokenizer.encode(a_ )
_UpperCAmelCase = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
@slow
def _a ( self ) -> Dict:
# fmt: off
_UpperCAmelCase = {"input_ids": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_UpperCAmelCase = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=a_ , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=a_ , )
| 657 | 0 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
UpperCamelCase : List[Any] = pytest.mark.integration
@pytest.mark.parametrize("""path""" , ["""paws""", """csv"""] )
def A__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] ):
inspect_dataset(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = path + """.py"""
assert script_name in os.listdir(__lowerCAmelCase )
assert "__pycache__" not in os.listdir(__lowerCAmelCase )
@pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.parametrize("""path""" , ["""accuracy"""] )
def A__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : int ):
inspect_metric(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = path + """.py"""
assert script_name in os.listdir(__lowerCAmelCase )
assert "__pycache__" not in os.listdir(__lowerCAmelCase )
@pytest.mark.parametrize(
"""path, config_name, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] ):
lowerCamelCase__ = get_dataset_config_info(__lowerCAmelCase , config_name=__lowerCAmelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] ):
with pytest.raises(__lowerCAmelCase ):
get_dataset_config_info(__lowerCAmelCase , config_name=__lowerCAmelCase )
@pytest.mark.parametrize(
"""path, expected""" , [
("""squad""", """plain_text"""),
("""acronym_identification""", """default"""),
("""lhoestq/squad""", """plain_text"""),
("""lhoestq/test""", """default"""),
("""lhoestq/demo1""", """lhoestq--demo1"""),
("""dalle-mini/wit""", """dalle-mini--wit"""),
] , )
def A__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
lowerCamelCase__ = get_dataset_config_names(__lowerCAmelCase )
assert expected in config_names
@pytest.mark.parametrize(
"""path, expected_configs, expected_splits_in_first_config""" , [
("""squad""", ["""plain_text"""], ["""train""", """validation"""]),
("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]),
("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]),
] , )
def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any ):
lowerCamelCase__ = get_dataset_infos(__lowerCAmelCase )
assert list(infos.keys() ) == expected_configs
lowerCamelCase__ = expected_configs[0]
assert expected_config in infos
lowerCamelCase__ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"""path, expected_config, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] ):
lowerCamelCase__ = get_dataset_infos(__lowerCAmelCase )
assert expected_config in infos
lowerCamelCase__ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] ):
with pytest.raises(__lowerCAmelCase ):
get_dataset_split_names(__lowerCAmelCase , config_name=__lowerCAmelCase )
| 50 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_UpperCAmelCase = f"Input value of [number={number}] must be an integer"
raise TypeError(UpperCamelCase__ )
if number < 0:
return False
_UpperCAmelCase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 | 0 |
'''simple docstring'''
def __snake_case ( SCREAMING_SNAKE_CASE_ : Any ) -> str:
"""simple docstring"""
UpperCAmelCase = [0] * len(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
if indegree[i] == 0:
queue.append(SCREAMING_SNAKE_CASE_ )
while queue:
UpperCAmelCase = queue.pop(0 )
cnt += 1
topo.append(SCREAMING_SNAKE_CASE_ )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(SCREAMING_SNAKE_CASE_ )
if cnt != len(SCREAMING_SNAKE_CASE_ ):
print('''Cycle exists''' )
else:
print(SCREAMING_SNAKE_CASE_ )
# Adjacency List of Graph
a__ : Optional[int] = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 51 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = Dict[str, Any]
__magic_name__ = List[Prediction]
@add_end_docstrings(lowerCamelCase )
class _lowerCAmelCase ( lowerCamelCase ):
def __init__( self , *a_ , **a_ ) -> Optional[int]:
super().__init__(*a_ , **a_ )
if self.framework == "tf":
raise ValueError(f"The {self.__class__} is only available in PyTorch." )
requires_backends(self , "vision" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def _a ( self , **a_ ) -> List[str]:
_UpperCAmelCase = {}
if "threshold" in kwargs:
_UpperCAmelCase = kwargs["threshold"]
return {}, {}, postprocess_kwargs
def __call__( self , *a_ , **a_ ) -> Union[Predictions, List[Prediction]]:
return super().__call__(*a_ , **a_ )
def _a ( self , a_ ) -> Optional[Any]:
_UpperCAmelCase = load_image(a_ )
_UpperCAmelCase = torch.IntTensor([[image.height, image.width]] )
_UpperCAmelCase = self.image_processor(images=[image] , return_tensors="pt" )
if self.tokenizer is not None:
_UpperCAmelCase = self.tokenizer(text=inputs["words"] , boxes=inputs["boxes"] , return_tensors="pt" )
_UpperCAmelCase = target_size
return inputs
def _a ( self , a_ ) -> Optional[Any]:
_UpperCAmelCase = model_inputs.pop("target_size" )
_UpperCAmelCase = self.model(**a_ )
_UpperCAmelCase = outputs.__class__({"target_size": target_size, **outputs} )
if self.tokenizer is not None:
_UpperCAmelCase = model_inputs["bbox"]
return model_outputs
def _a ( self , a_ , a_=0.9 ) -> int:
_UpperCAmelCase = model_outputs["target_size"]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
_UpperCAmelCase , _UpperCAmelCase = target_size[0].tolist()
def unnormalize(a_ ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
_UpperCAmelCase , _UpperCAmelCase = model_outputs["logits"].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
_UpperCAmelCase = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
_UpperCAmelCase = [unnormalize(a_ ) for bbox in model_outputs["bbox"].squeeze(0 )]
_UpperCAmelCase = ["score", "label", "box"]
_UpperCAmelCase = [dict(zip(a_ , a_ ) ) for vals in zip(scores.tolist() , a_ , a_ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
_UpperCAmelCase = self.image_processor.post_process_object_detection(a_ , a_ , a_ )
_UpperCAmelCase = raw_annotations[0]
_UpperCAmelCase = raw_annotation["scores"]
_UpperCAmelCase = raw_annotation["labels"]
_UpperCAmelCase = raw_annotation["boxes"]
_UpperCAmelCase = scores.tolist()
_UpperCAmelCase = [self.model.config.idalabel[label.item()] for label in labels]
_UpperCAmelCase = [self._get_bounding_box(a_ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
_UpperCAmelCase = ["score", "label", "box"]
_UpperCAmelCase = [
dict(zip(a_ , a_ ) )
for vals in zip(raw_annotation["scores"] , raw_annotation["labels"] , raw_annotation["boxes"] )
]
return annotation
def _a ( self , a_ ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError("The ObjectDetectionPipeline is only available in PyTorch." )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = box.int().tolist()
_UpperCAmelCase = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 657 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A = {
'''configuration_xlm_roberta''': [
'''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaConfig''',
'''XLMRobertaOnnxConfig''',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['''XLMRobertaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['''XLMRobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaForCausalLM''',
'''XLMRobertaForMaskedLM''',
'''XLMRobertaForMultipleChoice''',
'''XLMRobertaForQuestionAnswering''',
'''XLMRobertaForSequenceClassification''',
'''XLMRobertaForTokenClassification''',
'''XLMRobertaModel''',
'''XLMRobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMRobertaForCausalLM''',
'''TFXLMRobertaForMaskedLM''',
'''TFXLMRobertaForMultipleChoice''',
'''TFXLMRobertaForQuestionAnswering''',
'''TFXLMRobertaForSequenceClassification''',
'''TFXLMRobertaForTokenClassification''',
'''TFXLMRobertaModel''',
'''TFXLMRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxXLMRobertaForMaskedLM''',
'''FlaxXLMRobertaForCausalLM''',
'''FlaxXLMRobertaForMultipleChoice''',
'''FlaxXLMRobertaForQuestionAnswering''',
'''FlaxXLMRobertaForSequenceClassification''',
'''FlaxXLMRobertaForTokenClassification''',
'''FlaxXLMRobertaModel''',
'''FlaxXLMRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 52 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def merge(UpperCamelCase__ , UpperCamelCase__ ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(UpperCamelCase__ ) <= 1:
return collection
_UpperCAmelCase = len(UpperCamelCase__ ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__magic_name__ = input('''Enter numbers separated by a comma:\n''').strip()
__magic_name__ = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 657 | 0 |
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : List[Any] ):
__lowerCAmelCase = checkpoint
__lowerCAmelCase = {}
__lowerCAmelCase = vae_state_dict['encoder.conv_in.weight']
__lowerCAmelCase = vae_state_dict['encoder.conv_in.bias']
__lowerCAmelCase = vae_state_dict['encoder.conv_out.weight']
__lowerCAmelCase = vae_state_dict['encoder.conv_out.bias']
__lowerCAmelCase = vae_state_dict['encoder.norm_out.weight']
__lowerCAmelCase = vae_state_dict['encoder.norm_out.bias']
__lowerCAmelCase = vae_state_dict['decoder.conv_in.weight']
__lowerCAmelCase = vae_state_dict['decoder.conv_in.bias']
__lowerCAmelCase = vae_state_dict['decoder.conv_out.weight']
__lowerCAmelCase = vae_state_dict['decoder.conv_out.bias']
__lowerCAmelCase = vae_state_dict['decoder.norm_out.weight']
__lowerCAmelCase = vae_state_dict['decoder.norm_out.bias']
__lowerCAmelCase = vae_state_dict['quant_conv.weight']
__lowerCAmelCase = vae_state_dict['quant_conv.bias']
__lowerCAmelCase = vae_state_dict['post_quant_conv.weight']
__lowerCAmelCase = vae_state_dict['post_quant_conv.bias']
# Retrieves the keys for the encoder down blocks only
__lowerCAmelCase = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'encoder.down' in layer} )
__lowerCAmelCase = {
layer_id: [key for key in vae_state_dict if F"""down.{layer_id}""" in key] for layer_id in range(lowerCAmelCase_ )
}
# Retrieves the keys for the decoder up blocks only
__lowerCAmelCase = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'decoder.up' in layer} )
__lowerCAmelCase = {
layer_id: [key for key in vae_state_dict if F"""up.{layer_id}""" in key] for layer_id in range(lowerCAmelCase_ )
}
for i in range(lowerCAmelCase_ ):
__lowerCAmelCase = [key for key in down_blocks[i] if F"""down.{i}""" in key and F"""down.{i}.downsample""" not in key]
if F"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
__lowerCAmelCase = vae_state_dict.pop(
F"""encoder.down.{i}.downsample.conv.weight""" )
__lowerCAmelCase = vae_state_dict.pop(
F"""encoder.down.{i}.downsample.conv.bias""" )
__lowerCAmelCase = renew_vae_resnet_paths(lowerCAmelCase_ )
__lowerCAmelCase = {'old': F"""down.{i}.block""", 'new': F"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, additional_replacements=[meta_path], config=lowerCAmelCase_ )
__lowerCAmelCase = [key for key in vae_state_dict if 'encoder.mid.block' in key]
__lowerCAmelCase = 2
for i in range(1, num_mid_res_blocks + 1 ):
__lowerCAmelCase = [key for key in mid_resnets if F"""encoder.mid.block_{i}""" in key]
__lowerCAmelCase = renew_vae_resnet_paths(lowerCAmelCase_ )
__lowerCAmelCase = {'old': F"""mid.block_{i}""", 'new': F"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, additional_replacements=[meta_path], config=lowerCAmelCase_ )
__lowerCAmelCase = [key for key in vae_state_dict if 'encoder.mid.attn' in key]
__lowerCAmelCase = renew_vae_attention_paths(lowerCAmelCase_ )
__lowerCAmelCase = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, additional_replacements=[meta_path], config=lowerCAmelCase_ )
conv_attn_to_linear(lowerCAmelCase_ )
for i in range(lowerCAmelCase_ ):
__lowerCAmelCase = num_up_blocks - 1 - i
__lowerCAmelCase = [
key for key in up_blocks[block_id] if F"""up.{block_id}""" in key and F"""up.{block_id}.upsample""" not in key
]
if F"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
__lowerCAmelCase = vae_state_dict[
F"""decoder.up.{block_id}.upsample.conv.weight"""
]
__lowerCAmelCase = vae_state_dict[
F"""decoder.up.{block_id}.upsample.conv.bias"""
]
__lowerCAmelCase = renew_vae_resnet_paths(lowerCAmelCase_ )
__lowerCAmelCase = {'old': F"""up.{block_id}.block""", 'new': F"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, additional_replacements=[meta_path], config=lowerCAmelCase_ )
__lowerCAmelCase = [key for key in vae_state_dict if 'decoder.mid.block' in key]
__lowerCAmelCase = 2
for i in range(1, num_mid_res_blocks + 1 ):
__lowerCAmelCase = [key for key in mid_resnets if F"""decoder.mid.block_{i}""" in key]
__lowerCAmelCase = renew_vae_resnet_paths(lowerCAmelCase_ )
__lowerCAmelCase = {'old': F"""mid.block_{i}""", 'new': F"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, additional_replacements=[meta_path], config=lowerCAmelCase_ )
__lowerCAmelCase = [key for key in vae_state_dict if 'decoder.mid.attn' in key]
__lowerCAmelCase = renew_vae_attention_paths(lowerCAmelCase_ )
__lowerCAmelCase = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, additional_replacements=[meta_path], config=lowerCAmelCase_ )
conv_attn_to_linear(lowerCAmelCase_ )
return new_checkpoint
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : str, ):
# Only support V1
__lowerCAmelCase = requests.get(
' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml' )
__lowerCAmelCase = io.BytesIO(r.content )
__lowerCAmelCase = OmegaConf.load(lowerCAmelCase_ )
__lowerCAmelCase = 512
__lowerCAmelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
if checkpoint_path.endswith('safetensors' ):
from safetensors import safe_open
__lowerCAmelCase = {}
with safe_open(lowerCAmelCase_, framework='pt', device='cpu' ) as f:
for key in f.keys():
__lowerCAmelCase = f.get_tensor(lowerCAmelCase_ )
else:
__lowerCAmelCase = torch.load(lowerCAmelCase_, map_location=lowerCAmelCase_ )['state_dict']
# Convert the VAE model.
__lowerCAmelCase = create_vae_diffusers_config(lowerCAmelCase_, image_size=lowerCAmelCase_ )
__lowerCAmelCase = custom_convert_ldm_vae_checkpoint(lowerCAmelCase_, lowerCAmelCase_ )
__lowerCAmelCase = AutoencoderKL(**lowerCAmelCase_ )
vae.load_state_dict(lowerCAmelCase_ )
vae.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_snake_case : List[Any] = argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
_snake_case : Union[str, Any] = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 53 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _lowerCAmelCase :
def __init__( self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ) -> List[str]:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = self.vocab_size - 1
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
_UpperCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> Optional[int]:
_UpperCAmelCase = OpenAIGPTModel(config=a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(a_ , token_type_ids=a_ , head_mask=a_ )
_UpperCAmelCase = model(a_ , token_type_ids=a_ )
_UpperCAmelCase = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> List[Any]:
_UpperCAmelCase = OpenAIGPTLMHeadModel(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> Optional[Any]:
_UpperCAmelCase = OpenAIGPTDoubleHeadsModel(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> Dict:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = OpenAIGPTForSequenceClassification(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase_ : Any = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowercase_ : Optional[Any] = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowercase_ : Union[str, Any] = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _a ( self , a_ , a_ , a_ , a_ , a_ ) -> Any:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _a ( self , a_ , a_ , a_=False ) -> Optional[int]:
_UpperCAmelCase = super()._prepare_for_class(a_ , a_ , return_labels=a_ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=a_ , )
_UpperCAmelCase = inputs_dict["labels"]
_UpperCAmelCase = inputs_dict["labels"]
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=a_ , )
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
return inputs_dict
def _a ( self ) -> Optional[int]:
_UpperCAmelCase = OpenAIGPTModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=a_ , n_embd=37 )
def _a ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _a ( self ) -> Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*a_ )
def _a ( self ) -> Tuple:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*a_ )
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*a_ )
@slow
def _a ( self ) -> int:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = OpenAIGPTModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def _a ( self ) -> Any:
_UpperCAmelCase = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" )
model.to(a_ )
_UpperCAmelCase = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=a_ ) # the president is
_UpperCAmelCase = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
_UpperCAmelCase = model.generate(a_ , do_sample=a_ )
self.assertListEqual(output_ids[0].tolist() , a_ )
| 657 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__lowercase : Union[str, Any] =logging.get_logger(__name__)
if is_vision_available():
import PIL
class A ( __lowercase ):
_snake_case =['''pixel_values''']
def __init__( self: Union[str, Any] , _lowerCAmelCase: bool = True , _lowerCAmelCase: Dict[str, int] = None , _lowerCAmelCase: PILImageResampling = PILImageResampling.BICUBIC , _lowerCAmelCase: bool = True , _lowerCAmelCase: Dict[str, int] = None , _lowerCAmelCase: bool = True , _lowerCAmelCase: Union[int, float] = 1 / 255 , _lowerCAmelCase: bool = True , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , _lowerCAmelCase: bool = True , **_lowerCAmelCase: str , ) -> None:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
UpperCAmelCase_ =size if size is not None else {"shortest_edge": 224}
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
UpperCAmelCase_ =crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase , param_name="crop_size" )
UpperCAmelCase_ =do_resize
UpperCAmelCase_ =size
UpperCAmelCase_ =resample
UpperCAmelCase_ =do_center_crop
UpperCAmelCase_ =crop_size
UpperCAmelCase_ =do_rescale
UpperCAmelCase_ =rescale_factor
UpperCAmelCase_ =do_normalize
UpperCAmelCase_ =image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCAmelCase_ =image_std if image_std is not None else OPENAI_CLIP_STD
UpperCAmelCase_ =do_convert_rgb
def lowerCAmelCase__ ( self: Dict , _lowerCAmelCase: np.ndarray , _lowerCAmelCase: Dict[str, int] , _lowerCAmelCase: PILImageResampling = PILImageResampling.BICUBIC , _lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase: Union[str, Any] , ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
UpperCAmelCase_ =get_resize_output_image_size(_lowerCAmelCase , size=size["shortest_edge"] , default_to_square=_lowerCAmelCase )
return resize(_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: np.ndarray , _lowerCAmelCase: Dict[str, int] , _lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase: Optional[int] , ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(_lowerCAmelCase , size=(size["height"], size["width"]) , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Union[str, Any] , _lowerCAmelCase: np.ndarray , _lowerCAmelCase: Union[int, float] , _lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase: Optional[Any] , ) -> Optional[int]:
'''simple docstring'''
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: np.ndarray , _lowerCAmelCase: Union[float, List[float]] , _lowerCAmelCase: Union[float, List[float]] , _lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase: Optional[int] , ) -> np.ndarray:
'''simple docstring'''
return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: ImageInput , _lowerCAmelCase: bool = None , _lowerCAmelCase: Dict[str, int] = None , _lowerCAmelCase: PILImageResampling = None , _lowerCAmelCase: bool = None , _lowerCAmelCase: int = None , _lowerCAmelCase: bool = None , _lowerCAmelCase: float = None , _lowerCAmelCase: bool = None , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , _lowerCAmelCase: bool = None , _lowerCAmelCase: Optional[Union[str, TensorType]] = None , _lowerCAmelCase: Optional[ChannelDimension] = ChannelDimension.FIRST , **_lowerCAmelCase: Tuple , ) -> PIL.Image.Image:
'''simple docstring'''
UpperCAmelCase_ =do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ =size if size is not None else self.size
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase , param_name="size" , default_to_square=_lowerCAmelCase )
UpperCAmelCase_ =resample if resample is not None else self.resample
UpperCAmelCase_ =do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ =crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase , param_name="crop_size" , default_to_square=_lowerCAmelCase )
UpperCAmelCase_ =do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ =rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ =do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ =image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ =image_std if image_std is not None else self.image_std
UpperCAmelCase_ =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCAmelCase_ =make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCAmelCase_ =[convert_to_rgb(_lowerCAmelCase ) for image in images]
# All transformations expect numpy arrays.
UpperCAmelCase_ =[to_numpy_array(_lowerCAmelCase ) for image in images]
if do_resize:
UpperCAmelCase_ =[self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase ) for image in images]
if do_center_crop:
UpperCAmelCase_ =[self.center_crop(image=_lowerCAmelCase , size=_lowerCAmelCase ) for image in images]
if do_rescale:
UpperCAmelCase_ =[self.rescale(image=_lowerCAmelCase , scale=_lowerCAmelCase ) for image in images]
if do_normalize:
UpperCAmelCase_ =[self.normalize(image=_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase ) for image in images]
UpperCAmelCase_ =[to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
UpperCAmelCase_ ={"pixel_values": images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
| 54 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__=10 ):
"""simple docstring"""
_UpperCAmelCase = []
for _ in range(UpperCamelCase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__=10 ):
"""simple docstring"""
_UpperCAmelCase = []
for step in range(UpperCamelCase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = os.path.join(UpperCamelCase__ , "schedule.bin" )
torch.save(scheduler.state_dict() , UpperCamelCase__ )
_UpperCAmelCase = torch.load(UpperCamelCase__ )
scheduler.load_state_dict(UpperCamelCase__ )
return lrs
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def _a ( self , a_ , a_ , a_ ) -> Optional[int]:
self.assertEqual(len(a_ ) , len(a_ ) )
for a, b in zip(a_ , a_ ):
self.assertAlmostEqual(a_ , a_ , delta=a_ )
def _a ( self ) -> str:
_UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=a_ )
_UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
_UpperCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_UpperCAmelCase = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(100 ):
_UpperCAmelCase = criterion(a_ , a_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=a_ )
_UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
_UpperCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_UpperCAmelCase = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=a_ , weight_decay=0.0 , relative_step=a_ , scale_parameter=a_ , warmup_init=a_ , )
for _ in range(1000 ):
_UpperCAmelCase = criterion(a_ , a_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
lowercase_ : List[Any] = nn.Linear(50 , 50 ) if is_torch_available() else None
lowercase_ : Tuple = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
lowercase_ : Dict = 10
def _a ( self , a_ , a_ , a_ , a_=None ) -> Union[str, Any]:
self.assertEqual(len(a_ ) , len(a_ ) )
for a, b in zip(a_ , a_ ):
self.assertAlmostEqual(a_ , a_ , delta=a_ , msg=a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
_UpperCAmelCase = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
_UpperCAmelCase , _UpperCAmelCase = data
_UpperCAmelCase = scheduler_func(self.optimizer , **a_ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
_UpperCAmelCase = unwrap_schedule(a_ , self.num_steps )
self.assertListAlmostEqual(
a_ , a_ , tol=1e-2 , msg=f"failed for {scheduler_func} in normal scheduler" , )
_UpperCAmelCase = scheduler_func(self.optimizer , **a_ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(a_ ) # wrap to test picklability of the schedule
_UpperCAmelCase = unwrap_and_save_reload_schedule(a_ , self.num_steps )
self.assertListEqual(a_ , a_ , msg=f"failed for {scheduler_func} in save and reload" )
class _lowerCAmelCase :
def __init__( self , a_ ) -> Union[str, Any]:
_UpperCAmelCase = fn
def __call__( self , *a_ , **a_ ) -> Union[str, Any]:
return self.fn(*a_ , **a_ )
@classmethod
def _a ( self , a_ ) -> Dict:
_UpperCAmelCase = list(map(self , scheduler.lr_lambdas ) )
| 657 | 0 |
def UpperCAmelCase ( a_ = 6_0_0_8_5_1_4_7_5_1_4_3 ) -> int:
"""simple docstring"""
try:
__A = int(a_ )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
__A = 2
__A = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
__A = i
while n % i == 0:
__A = n // i
i += 1
return int(a_ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 55 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __lowerCamelCase ( UpperCamelCase__=None ):
"""simple docstring"""
if subparsers is not None:
_UpperCAmelCase = subparsers.add_parser("test" )
else:
_UpperCAmelCase = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=UpperCamelCase__ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase__ )
return parser
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
_UpperCAmelCase = script_name
else:
_UpperCAmelCase = f"--config_file={args.config_file} {script_name}"
_UpperCAmelCase = ["accelerate-launch"] + test_args.split()
_UpperCAmelCase = execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = test_command_parser()
_UpperCAmelCase = parser.parse_args()
test_command(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 657 | 0 |
'''simple docstring'''
import math
import unittest
def _a (lowercase__ : int ) -> bool:
"""simple docstring"""
assert isinstance(lowercase__ , lowercase__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowercase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class _lowercase ( unittest.TestCase ):
def a ( self : Optional[Any] ) -> Any:
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def a ( self : Tuple ) -> Dict:
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , 'Zero doesn\'t have any positive factors, primes must have exactly two.' , )
self.assertFalse(
is_prime(1 ) , 'One only has 1 positive factor, primes must have exactly two.' , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 56 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
return 10 - x * x
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if equation(UpperCamelCase__ ) * equation(UpperCamelCase__ ) >= 0:
raise ValueError("Wrong space!" )
_UpperCAmelCase = a
while (b - a) >= 0.01:
# Find middle point
_UpperCAmelCase = (a + b) / 2
# Check if middle point is root
if equation(UpperCamelCase__ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(UpperCamelCase__ ) * equation(UpperCamelCase__ ) < 0:
_UpperCAmelCase = c
else:
_UpperCAmelCase = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 657 | 0 |
from itertools import permutations
def snake_case (UpperCAmelCase__ ) -> bool:
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
UpperCamelCase_: Optional[int] = [7, 1_1, 1_3, 1_7]
for i, test in enumerate(UpperCAmelCase__ ):
if (num[i + 4] * 1_0_0 + num[i + 5] * 1_0 + num[i + 6]) % test != 0:
return False
return True
def snake_case (UpperCAmelCase__ = 1_0 ) -> int:
return sum(
int(''.join(map(UpperCAmelCase__ , UpperCAmelCase__ ) ) )
for num in permutations(range(UpperCAmelCase__ ) )
if is_substring_divisible(UpperCAmelCase__ ) )
if __name__ == "__main__":
print(F'''{solution() = }''') | 57 |
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
lowercase_ : Tuple = [r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self , a_ , a_ , a_ = None , a_ = 50257 , a_ = 1024 , a_ = 768 , a_ = 12 , a_ = 12 , a_ = None , a_ = "gelu_new" , a_ = 0.1 , a_ = 0.1 , a_ = 0.1 , a_ = 1e-5 , a_ = 0.02 , a_ = True , a_ = True , a_ = False , a_ = False , ) -> List[str]:
super().__init__()
_UpperCAmelCase = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"
f" `n_embd`: {n_embd} are not equal." )
_UpperCAmelCase = prefix_inner_dim
_UpperCAmelCase = prefix_hidden_dim
_UpperCAmelCase = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
_UpperCAmelCase = (
nn.Linear(self.prefix_hidden_dim , a_ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
_UpperCAmelCase = GPTaConfig(
vocab_size=a_ , n_positions=a_ , n_embd=a_ , n_layer=a_ , n_head=a_ , n_inner=a_ , activation_function=a_ , resid_pdrop=a_ , embd_pdrop=a_ , attn_pdrop=a_ , layer_norm_epsilon=a_ , initializer_range=a_ , scale_attn_weights=a_ , use_cache=a_ , scale_attn_by_inverse_layer_idx=a_ , reorder_and_upcast_attn=a_ , )
_UpperCAmelCase = GPTaLMHeadModel(a_ )
def _a ( self , a_ , a_ , a_ = None , a_ = None , ) -> Tuple:
_UpperCAmelCase = self.transformer.transformer.wte(a_ )
_UpperCAmelCase = self.encode_prefix(a_ )
_UpperCAmelCase = self.decode_prefix(a_ )
_UpperCAmelCase = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
_UpperCAmelCase = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
_UpperCAmelCase = torch.cat((dummy_token, input_ids) , dim=1 )
_UpperCAmelCase = self.transformer(inputs_embeds=a_ , labels=a_ , attention_mask=a_ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def _a ( self , a_ , a_ ) -> torch.Tensor:
return torch.zeros(a_ , self.prefix_length , dtype=torch.intaa , device=a_ )
def _a ( self , a_ ) -> Union[str, Any]:
return self.encode_prefix(a_ )
@torch.no_grad()
def _a ( self , a_ , a_ , a_ ) -> Union[str, Any]:
_UpperCAmelCase = torch.split(a_ , 1 , dim=0 )
_UpperCAmelCase = []
_UpperCAmelCase = []
for feature in features:
_UpperCAmelCase = self.decode_prefix(feature.to(a_ ) ) # back to the clip feature
# Only support beam search for now
_UpperCAmelCase , _UpperCAmelCase = self.generate_beam(
input_embeds=a_ , device=a_ , eos_token_id=a_ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
_UpperCAmelCase = torch.stack(a_ )
_UpperCAmelCase = torch.stack(a_ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def _a ( self , a_=None , a_=None , a_=None , a_ = 5 , a_ = 67 , a_ = 1.0 , a_ = None , ) -> Optional[Any]:
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = torch.ones(a_ , device=a_ , dtype=torch.int )
_UpperCAmelCase = torch.zeros(a_ , device=a_ , dtype=torch.bool )
if input_embeds is not None:
_UpperCAmelCase = input_embeds
else:
_UpperCAmelCase = self.transformer.transformer.wte(a_ )
for i in range(a_ ):
_UpperCAmelCase = self.transformer(inputs_embeds=a_ )
_UpperCAmelCase = outputs.logits
_UpperCAmelCase = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
_UpperCAmelCase = logits.softmax(-1 ).log()
if scores is None:
_UpperCAmelCase , _UpperCAmelCase = logits.topk(a_ , -1 )
_UpperCAmelCase = generated.expand(a_ , *generated.shape[1:] )
_UpperCAmelCase , _UpperCAmelCase = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
_UpperCAmelCase = next_tokens
else:
_UpperCAmelCase = tokens.expand(a_ , *tokens.shape[1:] )
_UpperCAmelCase = torch.cat((tokens, next_tokens) , dim=1 )
else:
_UpperCAmelCase = -float(np.inf )
_UpperCAmelCase = 0
_UpperCAmelCase = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
_UpperCAmelCase = scores_sum / seq_lengths[:, None]
_UpperCAmelCase , _UpperCAmelCase = scores_sum_average.view(-1 ).topk(a_ , -1 )
_UpperCAmelCase = next_tokens // scores_sum.shape[1]
_UpperCAmelCase = seq_lengths[next_tokens_source]
_UpperCAmelCase = next_tokens % scores_sum.shape[1]
_UpperCAmelCase = next_tokens.unsqueeze(1 )
_UpperCAmelCase = tokens[next_tokens_source]
_UpperCAmelCase = torch.cat((tokens, next_tokens) , dim=1 )
_UpperCAmelCase = generated[next_tokens_source]
_UpperCAmelCase = scores_sum_average * seq_lengths
_UpperCAmelCase = is_stopped[next_tokens_source]
_UpperCAmelCase = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
_UpperCAmelCase = torch.cat((generated, next_token_embed) , dim=1 )
_UpperCAmelCase = is_stopped + next_tokens.eq(a_ ).squeeze()
if is_stopped.all():
break
_UpperCAmelCase = scores / seq_lengths
_UpperCAmelCase = scores.argsort(descending=a_ )
# tokens tensors are already padded to max_seq_length
_UpperCAmelCase = [tokens[i] for i in order]
_UpperCAmelCase = torch.stack(a_ , dim=0 )
_UpperCAmelCase = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 657 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=7 , _lowercase=3 , _lowercase=1_8 , _lowercase=3_0 , _lowercase=4_0_0 , _lowercase=True , _lowercase=None , _lowercase=True , _lowercase=None , ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = size if size is not None else {"""shortest_edge""": 2_0}
snake_case_ : Optional[int] = crop_size if crop_size is not None else {"""height""": 1_8, """width""": 1_8}
snake_case_ : Optional[Any] = parent
snake_case_ : Union[str, Any] = batch_size
snake_case_ : Any = num_channels
snake_case_ : List[Any] = image_size
snake_case_ : Dict = min_resolution
snake_case_ : int = max_resolution
snake_case_ : List[Any] = do_resize
snake_case_ : Dict = size
snake_case_ : Optional[Any] = do_center_crop
snake_case_ : List[str] = crop_size
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = MobileNetVaImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : str = MobileNetVaImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , """do_resize""" ) )
self.assertTrue(hasattr(_lowercase , """size""" ) )
self.assertTrue(hasattr(_lowercase , """do_center_crop""" ) )
self.assertTrue(hasattr(_lowercase , """crop_size""" ) )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 2_0} )
self.assertEqual(image_processor.crop_size , {"""height""": 1_8, """width""": 1_8} )
snake_case_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 4_2} )
self.assertEqual(image_processor.crop_size , {"""height""": 8_4, """width""": 8_4} )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
snake_case_ : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case_ : int = image_processing(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray )
# Test not batched input
snake_case_ : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case_ : List[Any] = image_processing(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
# Test not batched input
snake_case_ : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case_ : List[str] = image_processing(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 58 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__magic_name__ = {'''configuration_gpt_neox''': ['''GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXConfig''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''GPTNeoXTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXForCausalLM''',
'''GPTNeoXForQuestionAnswering''',
'''GPTNeoXForSequenceClassification''',
'''GPTNeoXForTokenClassification''',
'''GPTNeoXLayer''',
'''GPTNeoXModel''',
'''GPTNeoXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 0 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "segformer"
def __init__(self : List[Any] , UpperCAmelCase_ : Optional[Any]=3 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : List[str]=[2, 2, 2, 2] , UpperCAmelCase_ : Optional[Any]=[8, 4, 2, 1] , UpperCAmelCase_ : Dict=[32, 64, 160, 256] , UpperCAmelCase_ : List[Any]=[7, 3, 3, 3] , UpperCAmelCase_ : int=[4, 2, 2, 2] , UpperCAmelCase_ : Dict=[1, 2, 5, 8] , UpperCAmelCase_ : Tuple=[4, 4, 4, 4] , UpperCAmelCase_ : int="gelu" , UpperCAmelCase_ : Union[str, Any]=0.0 , UpperCAmelCase_ : Any=0.0 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : List[str]=0.02 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Optional[Any]=1E-6 , UpperCAmelCase_ : Union[str, Any]=256 , UpperCAmelCase_ : Tuple=255 , **UpperCAmelCase_ : Dict , ) ->Optional[Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase_)
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"
" removed, as the behaviour will default to that of reshape_last_stage = True." , UpperCAmelCase_ , )
lowerCamelCase__: Union[str, Any] =num_channels
lowerCamelCase__: Optional[Any] =num_encoder_blocks
lowerCamelCase__: List[Any] =depths
lowerCamelCase__: str =sr_ratios
lowerCamelCase__: Dict =hidden_sizes
lowerCamelCase__: Dict =patch_sizes
lowerCamelCase__: str =strides
lowerCamelCase__: Dict =mlp_ratios
lowerCamelCase__: Dict =num_attention_heads
lowerCamelCase__: List[Any] =hidden_act
lowerCamelCase__: int =hidden_dropout_prob
lowerCamelCase__: Optional[Any] =attention_probs_dropout_prob
lowerCamelCase__: Union[str, Any] =classifier_dropout_prob
lowerCamelCase__: List[Any] =initializer_range
lowerCamelCase__: Tuple =drop_path_rate
lowerCamelCase__: Optional[int] =layer_norm_eps
lowerCamelCase__: int =decoder_hidden_size
lowerCamelCase__: Tuple =kwargs.get("reshape_last_stage" , UpperCAmelCase_)
lowerCamelCase__: List[str] =semantic_loss_ignore_index
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = version.parse("1.11" )
@property
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->float:
'''simple docstring'''
return 1E-4
@property
def SCREAMING_SNAKE_CASE_ (self : Any) ->int:
'''simple docstring'''
return 12
| 59 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : Union[str, Any] = '''convbert'''
def __init__( self , a_=30522 , a_=768 , a_=12 , a_=12 , a_=3072 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=2 , a_=0.02 , a_=1e-12 , a_=1 , a_=0 , a_=2 , a_=768 , a_=2 , a_=9 , a_=1 , a_=None , **a_ , ) -> Tuple:
super().__init__(
pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ , )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = embedding_size
_UpperCAmelCase = head_ratio
_UpperCAmelCase = conv_kernel_size
_UpperCAmelCase = num_groups
_UpperCAmelCase = classifier_dropout
class _lowerCAmelCase ( lowerCamelCase ):
@property
def _a ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_UpperCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 657 | 0 |
import random
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> tuple:
"""simple docstring"""
snake_case_ , snake_case_ , snake_case_ : Optional[Any] = [], [], []
for element in data:
if element < pivot:
less.append(_UpperCamelCase )
elif element > pivot:
greater.append(_UpperCamelCase )
else:
equal.append(_UpperCamelCase )
return less, equal, greater
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> List[str]:
"""simple docstring"""
if index >= len(_UpperCamelCase ) or index < 0:
return None
snake_case_ : Union[str, Any] = items[random.randint(0 , len(_UpperCamelCase ) - 1 )]
snake_case_ : Optional[Any] = 0
snake_case_ , snake_case_ , snake_case_ : List[str] = _partition(_UpperCamelCase , _UpperCamelCase )
snake_case_ : Optional[Any] = len(_UpperCamelCase )
snake_case_ : Optional[int] = len(_UpperCamelCase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_UpperCamelCase , _UpperCamelCase )
# must be in larger
else:
return quick_select(_UpperCamelCase , index - (m + count) )
| 60 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
return "".join([hex(UpperCamelCase__ )[2:].zfill(2 ).upper() for byte in list(UpperCamelCase__ )] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if (len(UpperCamelCase__ ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(UpperCamelCase__ ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(UpperCamelCase__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 | 0 |
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int = 13 , SCREAMING_SNAKE_CASE__ : int = 64 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 3 , SCREAMING_SNAKE_CASE__ : int = 3 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : int = 128 , SCREAMING_SNAKE_CASE__ : int=[16, 32, 64, 128] , SCREAMING_SNAKE_CASE__ : int = 7 , SCREAMING_SNAKE_CASE__ : int = 4 , SCREAMING_SNAKE_CASE__ : int = 37 , SCREAMING_SNAKE_CASE__ : str = "gelu" , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : int = 10 , SCREAMING_SNAKE_CASE__ : float = 0.02 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : int = 128 , SCREAMING_SNAKE_CASE__ : List[int] = [2, 2, 2, 2] , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , ) -> Tuple:
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = encoder_stride
lowerCAmelCase__ = num_attention_outputs
lowerCAmelCase__ = embed_dim
lowerCAmelCase__ = embed_dim + 1
lowerCAmelCase__ = resolution
lowerCAmelCase__ = depths
lowerCAmelCase__ = hidden_sizes
lowerCAmelCase__ = dim
lowerCAmelCase__ = mlp_expansion_ratio
def a ( self : int ) -> Optional[Any]:
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = self.get_config()
return config, pixel_values, labels
def a ( self : int ) -> List[str]:
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def a ( self : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Any:
lowerCAmelCase__ = TFEfficientFormerModel(config=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> int:
lowerCAmelCase__ = self.type_sequence_label_size
lowerCAmelCase__ = TFEfficientFormerForImageClassification(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase__ = 1
lowerCAmelCase__ = TFEfficientFormerForImageClassification(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a ( self : Dict ) -> Optional[Any]:
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
snake_case__ = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
snake_case__ = (
{
"feature-extraction": TFEfficientFormerModel,
"image-classification": (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def a ( self : List[str] ) -> int:
lowerCAmelCase__ = TFEfficientFormerModelTester(self )
lowerCAmelCase__ = ConfigTester(
self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def a ( self : Any ) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="EfficientFormer does not use inputs_embeds" )
def a ( self : str ) -> Dict:
pass
@unittest.skip(reason="EfficientFormer does not support input and output embeddings" )
def a ( self : Optional[Any] ) -> List[str]:
pass
def a ( self : Union[str, Any] ) -> Union[str, Any]:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
def a ( self : List[str] ) -> Union[str, Any]:
def check_hidden_states_output(SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] ):
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , training=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase__ = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
if hasattr(self.model_tester , "encoder_seq_length" ):
lowerCAmelCase__ = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , "chunk_length" ) and self.model_tester.chunk_length > 1:
lowerCAmelCase__ = seq_length * self.model_tester.chunk_length
else:
lowerCAmelCase__ = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
lowerCAmelCase__ = outputs.decoder_hidden_states
self.asseretIsInstance(SCREAMING_SNAKE_CASE__ , (list, tuple) )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = getattr(self.model_tester , "seq_length" , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = getattr(self.model_tester , "decoder_seq_length" , SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int]=False ) -> Tuple:
lowerCAmelCase__ = super()._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def a ( self : List[Any] ) -> List[Any]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
@unittest.skip(reason="EfficientFormer does not implement masked image modeling yet" )
def a ( self : List[str] ) -> Union[str, Any]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE__ )
def a ( self : str ) -> Any:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def a ( self : Optional[Any] ) -> str:
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFEfficientFormerModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def a ( self : Optional[int] ) -> Optional[Any]:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = True
lowerCAmelCase__ = getattr(self.model_tester , "seq_length" , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = getattr(self.model_tester , "encoder_seq_length" , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = getattr(self.model_tester , "key_length" , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = getattr(self.model_tester , "chunk_length" , SCREAMING_SNAKE_CASE__ )
if chunk_length is not None and hasattr(self.model_tester , "num_hashes" ):
lowerCAmelCase__ = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , training=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , training=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def a ( self : Union[str, Any] ) -> Any:
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
lowerCAmelCase__ = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=SCREAMING_SNAKE_CASE__ )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.assertTrue(outputs_dict is not None )
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def a ( self : Dict ) -> Dict:
return (
EfficientFormerImageProcessor.from_pretrained("snap-research/efficientformer-l1-300" )
if is_vision_available()
else None
)
@slow
def a ( self : str ) -> Optional[Any]:
lowerCAmelCase__ = TFEfficientFormerForImageClassification.from_pretrained("snap-research/efficientformer-l1-300" )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="tf" )
# forward pass
lowerCAmelCase__ = model(**SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
# verify the logits
lowerCAmelCase__ = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = tf.constant([-0.0_555, 0.4_825, -0.0_852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
@slow
def a ( self : Tuple ) -> Union[str, Any]:
lowerCAmelCase__ = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"snap-research/efficientformer-l1-300" )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="tf" )
# forward pass
lowerCAmelCase__ = model(**SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
# verify the logits
lowerCAmelCase__ = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = tf.constant([-0.1_312, 0.4_353, -1.0_499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
| 61 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
try:
_UpperCAmelCase = float(UpperCamelCase__ )
except ValueError:
raise ValueError("Please enter a valid number" )
_UpperCAmelCase = decimal - int(UpperCamelCase__ )
if fractional_part == 0:
return int(UpperCamelCase__ ), 1
else:
_UpperCAmelCase = len(str(UpperCamelCase__ ).split("." )[1] )
_UpperCAmelCase = int(decimal * (10**number_of_frac_digits) )
_UpperCAmelCase = 10**number_of_frac_digits
_UpperCAmelCase , _UpperCAmelCase = denominator, numerator
while True:
_UpperCAmelCase = dividend % divisor
if remainder == 0:
break
_UpperCAmelCase , _UpperCAmelCase = divisor, remainder
_UpperCAmelCase , _UpperCAmelCase = numerator / divisor, denominator / divisor
return int(UpperCamelCase__ ), int(UpperCamelCase__ )
if __name__ == "__main__":
print(f'''{decimal_to_fraction(2) = }''')
print(f'''{decimal_to_fraction(89.0) = }''')
print(f'''{decimal_to_fraction("67") = }''')
print(f'''{decimal_to_fraction("45.0") = }''')
print(f'''{decimal_to_fraction(1.5) = }''')
print(f'''{decimal_to_fraction("6.25") = }''')
print(f'''{decimal_to_fraction("78td") = }''')
| 657 | 0 |
import math
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = []
SCREAMING_SNAKE_CASE : Union[str, Any] = 2
SCREAMING_SNAKE_CASE : List[str] = int(math.sqrt(lowercase ) ) # Size of every segment
SCREAMING_SNAKE_CASE : str = [True] * (end + 1)
SCREAMING_SNAKE_CASE : Dict = []
while start <= end:
if temp[start] is True:
in_prime.append(lowercase )
for i in range(start * start , end + 1 , lowercase ):
SCREAMING_SNAKE_CASE : Optional[Any] = False
start += 1
prime += in_prime
SCREAMING_SNAKE_CASE : Union[str, Any] = end + 1
SCREAMING_SNAKE_CASE : Optional[Any] = min(2 * end , lowercase )
while low <= n:
SCREAMING_SNAKE_CASE : Dict = [True] * (high - low + 1)
for each in in_prime:
SCREAMING_SNAKE_CASE : Tuple = math.floor(low / each ) * each
if t < low:
t += each
for j in range(lowercase , high + 1 , lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = False
for j in range(len(lowercase ) ):
if temp[j] is True:
prime.append(j + low )
SCREAMING_SNAKE_CASE : str = high + 1
SCREAMING_SNAKE_CASE : List[Any] = min(high + end , lowercase )
return prime
print(sieve(10**6))
| 62 |
"""simple docstring"""
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_UpperCAmelCase = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
_UpperCAmelCase = f"{src_lang}-{tgt_lang}"
_UpperCAmelCase = f"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n"
model_card_dir.mkdir(parents=UpperCamelCase__ , exist_ok=UpperCamelCase__ )
_UpperCAmelCase = os.path.join(UpperCamelCase__ , "README.md" )
print(f"Generating {path}" )
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as f:
f.write(UpperCamelCase__ )
# make sure we are under the root of the project
__magic_name__ = Path(__file__).resolve().parent.parent.parent
__magic_name__ = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
__magic_name__ = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name)
| 657 | 0 |
from __future__ import annotations
from typing import Generic, TypeVar
a : List[Any] = TypeVar("T")
class a ( Generic[T] ):
"""simple docstring"""
def __init__( self : str , __lowercase : T ) -> None:
__UpperCAmelCase : Optional[int] = data
__UpperCAmelCase : Optional[Any] = self
__UpperCAmelCase : Optional[int] = 0
class a ( Generic[T] ):
"""simple docstring"""
def __init__( self : Any ) -> None:
# map from node name to the node object
__UpperCAmelCase : dict[T, DisjointSetTreeNode[T]] = {}
def UpperCAmelCase ( self : List[str] , __lowercase : T ) -> None:
# create a new set with x as its member
__UpperCAmelCase : Optional[int] = DisjointSetTreeNode(__lowercase )
def UpperCAmelCase ( self : Optional[Any] , __lowercase : T ) -> DisjointSetTreeNode[T]:
# find the set x belongs to (with path-compression)
__UpperCAmelCase : Any = self.map[data]
if elem_ref != elem_ref.parent:
__UpperCAmelCase : List[Any] = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def UpperCAmelCase ( self : Tuple , __lowercase : DisjointSetTreeNode[T] , __lowercase : DisjointSetTreeNode[T] ) -> None:
# helper function for union operation
if nodea.rank > nodea.rank:
__UpperCAmelCase : Dict = nodea
else:
__UpperCAmelCase : List[Any] = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def UpperCAmelCase ( self : int , __lowercase : T , __lowercase : T ) -> None:
# merge 2 disjoint sets
self.link(self.find_set(__lowercase ) , self.find_set(__lowercase ) )
class a ( Generic[T] ):
"""simple docstring"""
def __init__( self : Any ) -> None:
# connections: map from the node to the neighbouring nodes (with weights)
__UpperCAmelCase : dict[T, dict[T, int]] = {}
def UpperCAmelCase ( self : Dict , __lowercase : T ) -> None:
# add a node ONLY if its not present in the graph
if node not in self.connections:
__UpperCAmelCase : List[Any] = {}
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : T , __lowercase : T , __lowercase : int ) -> None:
# add an edge with the given weight
self.add_node(__lowercase )
self.add_node(__lowercase )
__UpperCAmelCase : Any = weight
__UpperCAmelCase : Any = weight
def UpperCAmelCase ( self : Tuple ) -> GraphUndirectedWeighted[T]:
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : int = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda __lowercase : x[2] )
# creating the disjoint set
__UpperCAmelCase : str = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(__lowercase )
# MST generation
__UpperCAmelCase : Any = 0
__UpperCAmelCase : Optional[int] = 0
__UpperCAmelCase : str = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = edges[index]
index += 1
__UpperCAmelCase : str = disjoint_set.find_set(__lowercase )
__UpperCAmelCase : Union[str, Any] = disjoint_set.find_set(__lowercase )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(__lowercase , __lowercase , __lowercase )
disjoint_set.union(__lowercase , __lowercase )
return graph
| 63 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _lowerCAmelCase ( metaclass=lowerCamelCase ):
lowercase_ : Dict = ['''torch''', '''torchsde''']
def __init__( self , *a_ , **a_ ) -> Optional[int]:
requires_backends(self , ["torch", "torchsde"] )
@classmethod
def _a ( cls , *a_ , **a_ ) -> Optional[Any]:
requires_backends(cls , ["torch", "torchsde"] )
@classmethod
def _a ( cls , *a_ , **a_ ) -> List[Any]:
requires_backends(cls , ["torch", "torchsde"] )
| 657 | 0 |
import os
def A__ ( ):
SCREAMING_SNAKE_CASE__: List[str]= os.path.dirname(os.path.realpath(snake_case_ ) )
SCREAMING_SNAKE_CASE__: Optional[int]= os.path.join(snake_case_ , '''triangle.txt''' )
with open(snake_case_ ) as f:
SCREAMING_SNAKE_CASE__: str= f.readlines()
SCREAMING_SNAKE_CASE__: List[Any]= []
for line in triangle:
SCREAMING_SNAKE_CASE__: Any= []
for number in line.strip().split(''' ''' ):
numbers_from_line.append(int(snake_case_ ) )
a.append(snake_case_ )
for i in range(1 , len(snake_case_ ) ):
for j in range(len(a[i] ) ):
SCREAMING_SNAKE_CASE__: Any= a[i - 1][j] if j != len(a[i - 1] ) else 0
SCREAMING_SNAKE_CASE__: Dict= a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(snake_case_ , snake_case_ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 64 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
__magic_name__ = logging.get_logger(__name__)
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : Optional[Any] = '''upernet'''
def __init__( self , a_=None , a_=512 , a_=0.02 , a_=[1, 2, 3, 6] , a_=True , a_=0.4 , a_=384 , a_=256 , a_=1 , a_=False , a_=255 , **a_ , ) -> List[Any]:
super().__init__(**a_ )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_UpperCAmelCase = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(a_ , a_ ):
_UpperCAmelCase = backbone_config.get("model_type" )
_UpperCAmelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase = config_class.from_dict(a_ )
_UpperCAmelCase = backbone_config
_UpperCAmelCase = hidden_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = pool_scales
_UpperCAmelCase = use_auxiliary_head
_UpperCAmelCase = auxiliary_loss_weight
_UpperCAmelCase = auxiliary_in_channels
_UpperCAmelCase = auxiliary_channels
_UpperCAmelCase = auxiliary_num_convs
_UpperCAmelCase = auxiliary_concat_input
_UpperCAmelCase = loss_ignore_index
def _a ( self ) -> int:
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.backbone_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
| 657 | 0 |
"""simple docstring"""
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase = False, False, False
@dataclass
class __lowercase :
snake_case_ = None
snake_case_ = True
snake_case_ = True
snake_case_ = None
# Automatically constructed
snake_case_ = "dict"
snake_case_ = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
snake_case_ = field(default="""Audio""" , init=__lowerCamelCase , repr=__lowerCamelCase )
def __call__( self : Tuple ):
'''simple docstring'''
return self.pa_type
def __lowercase ( self : List[str] ,A : Union[str, bytes, dict] ):
'''simple docstring'''
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("""To support encoding audio data, please install 'soundfile'.""" ) from err
if isinstance(A ,A ):
return {"bytes": None, "path": value}
elif isinstance(A ,A ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
UpperCAmelCase__ : int = BytesIO()
sf.write(A ,value["""array"""] ,value["""sampling_rate"""] ,format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("""pcm""" ):
# "PCM" only has raw audio bytes
if value.get("""sampling_rate""" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("""To use PCM files, please specify a 'sampling_rate' in Audio object""" )
if value.get("""bytes""" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
UpperCAmelCase__ : Optional[int] = np.frombuffer(value["""bytes"""] ,dtype=np.intaa ).astype(np.floataa ) / 32_767
else:
UpperCAmelCase__ : str = np.memmap(value["""path"""] ,dtype="""h""" ,mode="""r""" ).astype(np.floataa ) / 32_767
UpperCAmelCase__ : List[Any] = BytesIO(bytes() )
sf.write(A ,A ,value["""sampling_rate"""] ,format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}." )
def __lowercase ( self : Optional[Any] ,A : dict ,A : Optional[Dict[str, Union[str, bool, None]]] = None ):
'''simple docstring'''
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Audio(decode=True) instead.""" )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = (value["""path"""], BytesIO(value["""bytes"""] )) if value["""bytes"""] is not None else (value["""path"""], None)
if path is None and file is None:
raise ValueError(f"An audio sample should have one of 'path' or 'bytes' but both are None in {value}." )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("""To support decoding audio files, please install 'librosa' and 'soundfile'.""" ) from err
UpperCAmelCase__ : Optional[int] = xsplitext(A )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"""Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"""Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
if file is None:
UpperCAmelCase__ : Union[str, Any] = token_per_repo_id or {}
UpperCAmelCase__ : Union[str, Any] = path.split("""::""" )[-1]
try:
UpperCAmelCase__ : List[Any] = string_to_dict(A ,config.HUB_DATASETS_URL )["""repo_id"""]
UpperCAmelCase__ : Optional[Any] = token_per_repo_id[repo_id]
except (ValueError, KeyError):
UpperCAmelCase__ : str = None
with xopen(A ,"""rb""" ,use_auth_token=A ) as f:
UpperCAmelCase__ , UpperCAmelCase__ : Any = sf.read(A )
else:
UpperCAmelCase__ , UpperCAmelCase__ : int = sf.read(A )
UpperCAmelCase__ : List[Any] = array.T
if self.mono:
UpperCAmelCase__ : List[str] = librosa.to_mono(A )
if self.sampling_rate and self.sampling_rate != sampling_rate:
UpperCAmelCase__ : Tuple = librosa.resample(A ,orig_sr=A ,target_sr=self.sampling_rate )
UpperCAmelCase__ : str = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def __lowercase ( self : Tuple ):
'''simple docstring'''
from .features import Value
if self.decode:
raise ValueError("""Cannot flatten a decoded Audio feature.""" )
return {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
def __lowercase ( self : Optional[int] ,A : Union[pa.StringArray, pa.StructArray] ):
'''simple docstring'''
if pa.types.is_string(storage.type ):
UpperCAmelCase__ : str = pa.array([None] * len(A ) ,type=pa.binary() )
UpperCAmelCase__ : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, storage] ,["""bytes""", """path"""] ,mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCAmelCase__ : List[Any] = pa.array([None] * len(A ) ,type=pa.string() )
UpperCAmelCase__ : int = pa.StructArray.from_arrays([storage, path_array] ,["""bytes""", """path"""] ,mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("""array""" ):
UpperCAmelCase__ : Dict = pa.array([Audio().encode_example(A ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
UpperCAmelCase__ : List[Any] = storage.field("""bytes""" )
else:
UpperCAmelCase__ : Tuple = pa.array([None] * len(A ) ,type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
UpperCAmelCase__ : List[str] = storage.field("""path""" )
else:
UpperCAmelCase__ : Union[str, Any] = pa.array([None] * len(A ) ,type=pa.string() )
UpperCAmelCase__ : Tuple = pa.StructArray.from_arrays([bytes_array, path_array] ,["""bytes""", """path"""] ,mask=storage.is_null() )
return array_cast(A ,self.pa_type )
def __lowercase ( self : List[str] ,A : pa.StructArray ):
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(A : Tuple ):
with xopen(A ,"""rb""" ) as f:
UpperCAmelCase__ : Any = f.read()
return bytes_
UpperCAmelCase__ : Dict = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] ,type=pa.binary() ,)
UpperCAmelCase__ : str = pa.array(
[os.path.basename(A ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] ,type=pa.string() ,)
UpperCAmelCase__ : List[str] = pa.StructArray.from_arrays([bytes_array, path_array] ,["""bytes""", """path"""] ,mask=bytes_array.is_null() )
return array_cast(A ,self.pa_type )
| 65 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import _LazyModule
__magic_name__ = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 657 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase_ ( __snake_case , __snake_case , __snake_case , unittest.TestCase ):
_UpperCamelCase : Dict = StableDiffusionInstructPixaPixPipeline
_UpperCamelCase : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width", "cross_attention_kwargs"}
_UpperCamelCase : Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_UpperCamelCase : List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
_UpperCamelCase : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __a ( self ):
torch.manual_seed(0 )
_lowercase : Optional[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=8 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
_lowercase : Optional[int] = PNDMScheduler(skip_prk_steps=_lowerCAmelCase )
torch.manual_seed(0 )
_lowercase : List[Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
_lowercase : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
_lowercase : Any = CLIPTextModel(_lowerCAmelCase )
_lowercase : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_lowercase : List[str] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __a ( self , _lowerCAmelCase , _lowerCAmelCase=0 ):
_lowercase : Tuple = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
_lowercase : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowercase : Dict = Image.fromarray(np.uinta(_lowerCAmelCase ) ).convert('RGB' )
if str(_lowerCAmelCase ).startswith('mps' ):
_lowercase : List[Any] = torch.manual_seed(_lowerCAmelCase )
else:
_lowercase : int = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
_lowercase : Any = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'image_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def __a ( self ):
_lowercase : str = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowercase : List[Any] = self.get_dummy_components()
_lowercase : Optional[int] = StableDiffusionInstructPixaPixPipeline(**_lowerCAmelCase )
_lowercase : List[str] = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowercase : List[Any] = self.get_dummy_inputs(_lowerCAmelCase )
_lowercase : Any = sd_pipe(**_lowerCAmelCase ).images
_lowercase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
_lowercase : List[str] = np.array([0.75_26, 0.37_50, 0.45_47, 0.61_17, 0.58_66, 0.50_16, 0.43_27, 0.56_42, 0.48_15] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __a ( self ):
_lowercase : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowercase : int = self.get_dummy_components()
_lowercase : Union[str, Any] = StableDiffusionInstructPixaPixPipeline(**_lowerCAmelCase )
_lowercase : List[Any] = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowercase : Tuple = self.get_dummy_inputs(_lowerCAmelCase )
_lowercase : Optional[Any] = 'french fries'
_lowercase : Dict = sd_pipe(**_lowerCAmelCase , negative_prompt=_lowerCAmelCase )
_lowercase : Optional[Any] = output.images
_lowercase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
_lowercase : Any = np.array([0.75_11, 0.36_42, 0.45_53, 0.62_36, 0.57_97, 0.50_13, 0.43_43, 0.56_11, 0.48_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __a ( self ):
_lowercase : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowercase : Dict = self.get_dummy_components()
_lowercase : int = StableDiffusionInstructPixaPixPipeline(**_lowerCAmelCase )
_lowercase : Any = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowercase : Any = self.get_dummy_inputs(_lowerCAmelCase )
_lowercase : Union[str, Any] = [inputs['prompt']] * 2
_lowercase : Union[str, Any] = np.array(inputs['image'] ).astype(np.floataa ) / 2_55.0
_lowercase : Tuple = torch.from_numpy(_lowerCAmelCase ).unsqueeze(0 ).to(_lowerCAmelCase )
_lowercase : Optional[int] = image / 2 + 0.5
_lowercase : List[Any] = image.permute(0 , 3 , 1 , 2 )
_lowercase : Optional[int] = image.repeat(2 , 1 , 1 , 1 )
_lowercase : Any = sd_pipe(**_lowerCAmelCase ).images
_lowercase : List[str] = image[-1, -3:, -3:, -1]
assert image.shape == (2, 3_2, 3_2, 3)
_lowercase : Optional[int] = np.array([0.58_12, 0.57_48, 0.52_22, 0.59_08, 0.56_95, 0.71_74, 0.68_04, 0.55_23, 0.55_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __a ( self ):
_lowercase : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowercase : Dict = self.get_dummy_components()
_lowercase : Any = EulerAncestralDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' )
_lowercase : Optional[Any] = StableDiffusionInstructPixaPixPipeline(**_lowerCAmelCase )
_lowercase : List[str] = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowercase : Dict = self.get_dummy_inputs(_lowerCAmelCase )
_lowercase : List[str] = sd_pipe(**_lowerCAmelCase ).images
_lowercase : List[Any] = image[0, -3:, -3:, -1]
_lowercase : Optional[int] = [round(_lowerCAmelCase , 4 ) for x in image_slice.flatten().tolist()]
print(','.join([str(_lowerCAmelCase ) for x in slice] ) )
assert image.shape == (1, 3_2, 3_2, 3)
_lowercase : str = np.array([0.74_17, 0.38_42, 0.47_32, 0.57_76, 0.58_91, 0.51_39, 0.40_52, 0.56_73, 0.49_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __a ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __a ( self ):
_lowercase : Union[str, Any] = self.get_dummy_components()
_lowercase : Dict = StableDiffusionInstructPixaPixPipeline(**_lowerCAmelCase )
_lowercase : List[str] = VaeImageProcessor(do_resize=_lowerCAmelCase , do_normalize=_lowerCAmelCase )
_lowercase : Dict = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowercase : str = pipe(**self.get_dummy_inputs_by_type(_lowerCAmelCase , input_image_type='pt' ) )[0]
_lowercase : List[str] = components['vae']
_lowercase : Optional[Any] = self.get_dummy_inputs_by_type(_lowerCAmelCase , input_image_type='pt' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
_lowercase : Optional[int] = vae.encode(inputs[image_param] ).latent_dist.mode()
_lowercase : Optional[Any] = pipe(**_lowerCAmelCase )[0]
_lowercase : List[str] = np.abs(out - out_latents_inputs ).max()
self.assertLess(_lowerCAmelCase , 1E-4 , 'passing latents as image input generate different result from passing image' )
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def __a ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self , _lowerCAmelCase=0 ):
_lowercase : Tuple = torch.manual_seed(_lowerCAmelCase )
_lowercase : List[str] = load_image(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' )
_lowercase : Optional[int] = {
'prompt': 'turn him into a cyborg',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'image_guidance_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def __a ( self ):
_lowercase : Optional[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
_lowercase : Tuple = self.get_inputs()
_lowercase : Dict = pipe(**_lowerCAmelCase ).images
_lowercase : Any = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowercase : Optional[Any] = np.array([0.59_02, 0.60_15, 0.60_27, 0.59_83, 0.60_92, 0.60_61, 0.57_65, 0.57_85, 0.55_55] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __a ( self ):
_lowercase : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=_lowerCAmelCase )
_lowercase : Dict = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
_lowercase : Optional[int] = self.get_inputs()
_lowercase : Optional[int] = pipe(**_lowerCAmelCase ).images
_lowercase : Any = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowercase : List[Any] = np.array([0.65_78, 0.68_17, 0.69_72, 0.67_61, 0.68_56, 0.69_16, 0.64_28, 0.65_16, 0.63_01] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __a ( self ):
_lowercase : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=_lowerCAmelCase )
_lowercase : Optional[int] = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
_lowercase : Tuple = self.get_inputs()
_lowercase : int = pipe(**_lowerCAmelCase ).images
_lowercase : Tuple = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowercase : str = np.array([0.38_28, 0.38_34, 0.38_18, 0.37_92, 0.38_65, 0.37_52, 0.37_92, 0.38_47, 0.37_53] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __a ( self ):
_lowercase : Dict = 0
def callback_fn(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> None:
_lowercase : Any = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
_lowercase : Optional[int] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
_lowercase : Dict = latents[0, -3:, -3:, -1]
_lowercase : Any = np.array([-0.24_63, -0.46_44, -0.97_56, 1.51_76, 1.44_14, 0.78_66, 0.98_97, 0.85_21, 0.79_83] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
_lowercase : List[str] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
_lowercase : List[Any] = latents[0, -3:, -3:, -1]
_lowercase : str = np.array([-0.26_44, -0.46_26, -0.96_53, 1.51_76, 1.45_51, 0.76_86, 0.98_05, 0.84_52, 0.81_15] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
_lowercase : Tuple = False
_lowercase : Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=_lowerCAmelCase , torch_dtype=torch.floataa )
_lowercase : str = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
_lowercase : Any = self.get_inputs()
pipe(**_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __a ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowercase : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=_lowerCAmelCase , torch_dtype=torch.floataa )
_lowercase : Dict = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_lowercase : List[Any] = self.get_inputs()
_lowercase : List[Any] = pipe(**_lowerCAmelCase )
_lowercase : Optional[int] = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 1_0**9
def __a ( self ):
_lowercase : Union[str, Any] = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
_lowercase : Union[str, Any] = inputs['image'].resize((5_0_4, 5_0_4) )
_lowercase : List[str] = 'timbrooks/instruct-pix2pix'
_lowercase : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
_lowerCAmelCase , safety_checker=_lowerCAmelCase , )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
_lowercase : Any = pipe(**_lowerCAmelCase )
_lowercase : List[str] = output.images[0]
_lowercase : List[Any] = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 5_0_4, 3)
_lowercase : Tuple = np.array([0.27_26, 0.25_29, 0.26_64, 0.26_55, 0.26_41, 0.26_42, 0.25_91, 0.26_49, 0.25_90] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 66 |
"""simple docstring"""
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCAmelCase :
def __init__( self , a_ , a_=2 , a_=3 , a_=4 , a_=2 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=99 , a_=36 , a_=3 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=16 , a_=2 , a_=0.02 , a_=6 , a_=6 , a_=3 , a_=4 , a_=None , a_=1000 , ) -> Optional[Any]:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = text_seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = coordinate_size
_UpperCAmelCase = shape_size
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_UpperCAmelCase = text_seq_length
_UpperCAmelCase = (image_size // patch_size) ** 2 + 1
_UpperCAmelCase = self.text_seq_length + self.image_seq_length
def _a ( self ) -> Dict:
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_UpperCAmelCase = bbox[i, j, 3]
_UpperCAmelCase = bbox[i, j, 1]
_UpperCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_UpperCAmelCase = bbox[i, j, 2]
_UpperCAmelCase = bbox[i, j, 0]
_UpperCAmelCase = t
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.text_seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_UpperCAmelCase = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Tuple:
_UpperCAmelCase = LayoutLMvaModel(config=a_ )
model.to(a_ )
model.eval()
# text + image
_UpperCAmelCase = model(a_ , pixel_values=a_ )
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ )
_UpperCAmelCase = model(a_ , bbox=a_ , pixel_values=a_ , token_type_ids=a_ )
_UpperCAmelCase = model(a_ , bbox=a_ , pixel_values=a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_UpperCAmelCase = model(a_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_UpperCAmelCase = model(pixel_values=a_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Optional[Any]:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = LayoutLMvaForSequenceClassification(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Union[str, Any]:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = LayoutLMvaForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Dict:
_UpperCAmelCase = LayoutLMvaForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , start_positions=a_ , end_positions=a_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self ) -> Optional[int]:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase_ : Any = False
lowercase_ : Dict = False
lowercase_ : List[str] = False
lowercase_ : str = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase_ : int = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def _a ( self , a_ , a_ , a_ , a_ , a_ ) -> List[str]:
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = LayoutLMvaModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=a_ , hidden_size=37 )
def _a ( self , a_ , a_ , a_=False ) -> List[str]:
_UpperCAmelCase = copy.deepcopy(a_ )
if model_class in get_values(a_ ):
_UpperCAmelCase = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(a_ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(a_ ):
_UpperCAmelCase = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=a_ )
elif model_class in get_values(a_ ):
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
elif model_class in [
*get_values(a_ ),
]:
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
elif model_class in [
*get_values(a_ ),
]:
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=a_ , )
return inputs_dict
def _a ( self ) -> int:
self.config_tester.run_common_tests()
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase = type
self.model_tester.create_and_check_model(*a_ )
def _a ( self ) -> int:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a_ )
def _a ( self ) -> Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a_ )
@slow
def _a ( self ) -> List[str]:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = LayoutLMvaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _a ( self ) -> List[Any]:
return LayoutLMvaImageProcessor(apply_ocr=a_ ) if is_vision_available() else None
@slow
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(a_ )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=a_ , return_tensors="pt" ).pixel_values.to(a_ )
_UpperCAmelCase = torch.tensor([[1, 2]] )
_UpperCAmelCase = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
_UpperCAmelCase = model(
input_ids=input_ids.to(a_ ) , bbox=bbox.to(a_ ) , pixel_values=pixel_values.to(a_ ) , )
# verify the logits
_UpperCAmelCase = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , a_ )
_UpperCAmelCase = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , a_ , atol=1e-4 ) )
| 657 | 0 |
from __future__ import annotations
from typing import Any
def SCREAMING_SNAKE_CASE__ ( snake_case__ :list[Any] ) -> None:
create_state_space_tree(snake_case__ , [] , 0 )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :list[Any] , snake_case__ :list[Any] , snake_case__ :int ) -> None:
if index == len(snake_case__ ):
print(snake_case__ )
return
create_state_space_tree(snake_case__ , snake_case__ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(snake_case__ , snake_case__ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
snake_case = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["""A""", """B""", """C"""])
generate_all_subsequences(seq) | 67 |
"""simple docstring"""
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _lowerCAmelCase ( unittest.TestCase ):
lowercase_ : str = MODEL_FOR_MASKED_LM_MAPPING
lowercase_ : List[str] = TF_MODEL_FOR_MASKED_LM_MAPPING
def _a ( self ) -> Optional[Any]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def _a ( self ) -> str:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf" )
_UpperCAmelCase = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is grouped", "score": 2.1e-05, "token": 38015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-05, "token": 25506, "token_str": " accuser"},
] , )
_UpperCAmelCase = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-05,
"token": 38015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-05,
"token": 25506,
"token_str": " accuser",
},
] , )
_UpperCAmelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-05, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt" )
_UpperCAmelCase = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is Maul", "score": 2.2e-05, "token": 35676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
_UpperCAmelCase = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
_UpperCAmelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is Patrick", "score": 2.1e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-05, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
] , )
_UpperCAmelCase = unmasker("My name is <mask> <mask>" , top_k=2 )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def _a ( self ) -> int:
_UpperCAmelCase = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt" )
# convert model to fp16
pipe.model.half()
_UpperCAmelCase = pipe("Paris is the [MASK] of France." )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(a_ , a_ )
@slow
@require_torch
def _a ( self ) -> int:
_UpperCAmelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt" )
self.run_large_test(a_ )
@slow
@require_tf
def _a ( self ) -> int:
_UpperCAmelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf" )
self.run_large_test(a_ )
def _a ( self , a_ ) -> int:
_UpperCAmelCase = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(a_ ) , [
{"sequence": "My name is John", "score": 0.008, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.007, "token": 1573, "token_str": " Chris"},
] , )
_UpperCAmelCase = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(a_ ) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.251,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.214,
"token": 12790,
"token_str": " Lyon",
},
] , )
_UpperCAmelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(a_ ) , [
{"sequence": "My name is Patrick", "score": 0.005, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.000, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.000, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def _a ( self ) -> Any:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt" )
_UpperCAmelCase = None
_UpperCAmelCase = None
self.run_pipeline_test(a_ , [] )
@require_tf
def _a ( self ) -> List[Any]:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf" )
_UpperCAmelCase = None
_UpperCAmelCase = None
self.run_pipeline_test(a_ , [] )
def _a ( self , a_ , a_ , a_ ) -> Optional[Any]:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)" )
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = [
f"This is another {tokenizer.mask_token} test",
]
return fill_masker, examples
def _a ( self , a_ , a_ ) -> List[str]:
_UpperCAmelCase = fill_masker.tokenizer
_UpperCAmelCase = fill_masker.model
_UpperCAmelCase = fill_masker(
f"This is a {tokenizer.mask_token}" , )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = fill_masker([f"This is a {tokenizer.mask_token}"] )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = fill_masker([f"This is a {tokenizer.mask_token}", f"Another {tokenizer.mask_token} great test."] )
self.assertEqual(
a_ , [
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
] , )
with self.assertRaises(a_ ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(a_ ):
fill_masker("This is" )
self.run_test_top_k(a_ , a_ )
self.run_test_targets(a_ , a_ )
self.run_test_top_k_targets(a_ , a_ )
self.fill_mask_with_duplicate_targets_and_top_k(a_ , a_ )
self.fill_mask_with_multiple_masks(a_ , a_ )
def _a ( self , a_ , a_ ) -> Optional[int]:
_UpperCAmelCase = tokenizer.get_vocab()
_UpperCAmelCase = sorted(vocab.keys() )[:2]
# Pipeline argument
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ , targets=a_ )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , a_ )
_UpperCAmelCase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(a_ ) )
# Call argument
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=a_ )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , a_ )
_UpperCAmelCase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(a_ ) )
# Score equivalence
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=a_ )
_UpperCAmelCase = [top_mask["token_str"] for top_mask in outputs]
_UpperCAmelCase = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(a_ ) == set(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=a_ )
_UpperCAmelCase = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(a_ ) , nested_simplify(a_ ) )
# Raises with invalid
with self.assertRaises(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=[""] )
with self.assertRaises(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets="" )
def _a ( self , a_ , a_ ) -> str:
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ , top_k=2 )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
self.assertEqual(nested_simplify(a_ ) , nested_simplify(a_ ) )
def _a ( self , a_ , a_ ) -> List[Any]:
_UpperCAmelCase = tokenizer.get_vocab()
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
# top_k=2, ntargets=3
_UpperCAmelCase = sorted(vocab.keys() )[:3]
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=2 , targets=a_ )
# If we use the most probably targets, and filter differently, we should still
# have the same results
_UpperCAmelCase = [el["token_str"] for el in sorted(a_ , key=lambda a_ : x["score"] , reverse=a_ )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(a_ ).issubset(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=3 , targets=a_ )
# They should yield exactly the same result
self.assertEqual(nested_simplify(a_ ) , nested_simplify(a_ ) )
def _a ( self , a_ , a_ ) -> Optional[Any]:
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = tokenizer.get_vocab()
# String duplicates + id duplicates
_UpperCAmelCase = sorted(vocab.keys() )[:3]
_UpperCAmelCase = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_UpperCAmelCase = fill_masker(f"My name is {tokenizer.mask_token}" , targets=a_ , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(a_ ) , 3 )
def _a ( self , a_ , a_ ) -> Any:
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = fill_masker(
f"This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
a_ , [
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
] , )
| 657 | 0 |
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def lowercase__ ( ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase =torch.nn.Linear(2 , 4 )
__UpperCAmelCase =torch.optim.AdamW(model.parameters() , lr=1.0 )
__UpperCAmelCase =torch.optim.lr_scheduler.OneCycleLR(A_ , max_lr=0.0_1 , steps_per_epoch=2 , epochs=1 )
__UpperCAmelCase =DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
__UpperCAmelCase =DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def lowercase__ ( A_: Optional[Any] ) -> str:
"""simple docstring"""
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def lowercase__ ( A_: Union[str, Any] ) -> int:
"""simple docstring"""
__UpperCAmelCase =torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(A_ )
class _A ( UpperCamelCase ):
"""simple docstring"""
@require_cuda
def _a ( self : List[Any] ) -> Any:
__UpperCAmelCase =Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
__UpperCAmelCase =Accelerator(cpu=__SCREAMING_SNAKE_CASE )
def _a ( self : Dict ) -> Union[str, Any]:
__UpperCAmelCase =Accelerator()
__UpperCAmelCase =GradientState()
assert state.num_steps == 1
__UpperCAmelCase =4
assert state.num_steps == 4
assert state.sync_gradients is True
__UpperCAmelCase =False
assert state.sync_gradients is False
GradientState._reset_state()
def _a ( self : List[str] ) -> Tuple:
__UpperCAmelCase =Accelerator()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =create_components()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) =accelerator.prepare(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def _a ( self : Any ) -> str:
__UpperCAmelCase =Accelerator()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =create_components()
accelerator.prepare(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def _a ( self : str ) -> Tuple:
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*__SCREAMING_SNAKE_CASE : List[Any] , **__SCREAMING_SNAKE_CASE : Union[str, Any] ):
pass
with patch("""torch.cuda.set_device""" , __SCREAMING_SNAKE_CASE ), patch_environment(ACCELERATE_TORCH_DEVICE="""cuda:64""" ):
__UpperCAmelCase =Accelerator()
self.assertEqual(str(accelerator.state.device ) , """cuda:64""" )
def _a ( self : Tuple ) -> str:
__UpperCAmelCase =Accelerator()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =create_components()
accelerator.prepare(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =get_signature(__SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__SCREAMING_SNAKE_CASE )
# make sure random weights don't match
load_random_weights(__SCREAMING_SNAKE_CASE )
self.assertTrue(abs(model_signature - get_signature(__SCREAMING_SNAKE_CASE ) ) > 1e-3 )
# make sure loaded weights match
accelerator.load_state(__SCREAMING_SNAKE_CASE )
self.assertTrue(abs(model_signature - get_signature(__SCREAMING_SNAKE_CASE ) ) < 1e-3 )
def _a ( self : Optional[int] ) -> List[Any]:
__UpperCAmelCase =Accelerator()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =create_components()
accelerator.prepare(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =get_signature(__SCREAMING_SNAKE_CASE )
# saving hook
def save_config(__SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[int] ):
__UpperCAmelCase ={"""class_name""": models[0].__class__.__name__}
with open(os.path.join(__SCREAMING_SNAKE_CASE , """data.json""" ) , """w""" ) as f:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# loading hook
def load_config(__SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] ):
with open(os.path.join(__SCREAMING_SNAKE_CASE , """data.json""" ) , """r""" ) as f:
__UpperCAmelCase =json.load(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =config["""class_name"""]
__UpperCAmelCase =accelerator.register_save_state_pre_hook(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =accelerator.register_load_state_pre_hook(__SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__SCREAMING_SNAKE_CASE )
# make sure random weights don't match with hooks
load_random_weights(__SCREAMING_SNAKE_CASE )
self.assertTrue(abs(model_signature - get_signature(__SCREAMING_SNAKE_CASE ) ) > 1e-3 )
# random class name to verify correct one is loaded
__UpperCAmelCase ="""random"""
# make sure loaded weights match with hooks
accelerator.load_state(__SCREAMING_SNAKE_CASE )
self.assertTrue(abs(model_signature - get_signature(__SCREAMING_SNAKE_CASE ) ) < 1e-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__SCREAMING_SNAKE_CASE )
# make sure random weights don't match with hooks removed
load_random_weights(__SCREAMING_SNAKE_CASE )
self.assertTrue(abs(model_signature - get_signature(__SCREAMING_SNAKE_CASE ) ) > 1e-3 )
# random class name to verify correct one is loaded
__UpperCAmelCase ="""random"""
# make sure loaded weights match with hooks removed
accelerator.load_state(__SCREAMING_SNAKE_CASE )
self.assertTrue(abs(model_signature - get_signature(__SCREAMING_SNAKE_CASE ) ) < 1e-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def _a ( self : str ) -> List[Any]:
__UpperCAmelCase =Accelerator()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =create_components()
__UpperCAmelCase =None
# This should work
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =accelerator.prepare(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertTrue(dummy_obj is None )
def _a ( self : Dict ) -> Any:
__UpperCAmelCase =Accelerator()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =create_components()
__UpperCAmelCase =[1, 2, 3]
# This should work
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =accelerator.prepare(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(
getattr(__SCREAMING_SNAKE_CASE , """_is_accelerate_prepared""" , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , """Dummy object should have `_is_accelerate_prepared` set to `True`""" , )
self.assertEqual(
getattr(__SCREAMING_SNAKE_CASE , """_is_accelerate_prepared""" , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , """Model is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(__SCREAMING_SNAKE_CASE , """_is_accelerate_prepared""" , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , """Optimizer is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(__SCREAMING_SNAKE_CASE , """_is_accelerate_prepared""" , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , """Scheduler is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(__SCREAMING_SNAKE_CASE , """_is_accelerate_prepared""" , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , """Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(__SCREAMING_SNAKE_CASE , """_is_accelerate_prepared""" , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , """Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`""" , )
@slow
@require_bnb
def _a ( self : Union[str, Any] ) -> str:
from transformers import AutoModelForCausalLM
__UpperCAmelCase =AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=__SCREAMING_SNAKE_CASE , device_map={"""""": 0} , )
__UpperCAmelCase =Accelerator()
# This should work
__UpperCAmelCase =accelerator.prepare(__SCREAMING_SNAKE_CASE )
@slow
@require_bnb
def _a ( self : str ) -> str:
from transformers import AutoModelForCausalLM
__UpperCAmelCase =Accelerator()
with init_empty_weights():
__UpperCAmelCase =AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
model.tie_weights()
__UpperCAmelCase =infer_auto_device_map(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase ="""cpu"""
__UpperCAmelCase =AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , device_map=__SCREAMING_SNAKE_CASE , load_in_abit=__SCREAMING_SNAKE_CASE , llm_inta_enable_fpaa_cpu_offload=__SCREAMING_SNAKE_CASE )
# This should not work and get value error
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
__UpperCAmelCase =accelerator.prepare(__SCREAMING_SNAKE_CASE )
@slow
@require_bnb
@require_multi_gpu
def _a ( self : str ) -> Any:
from transformers import AutoModelForCausalLM
__UpperCAmelCase ={"""distributed_type""": DistributedType.MULTI_GPU}
with init_empty_weights():
__UpperCAmelCase =AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
model.tie_weights()
__UpperCAmelCase =infer_auto_device_map(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =1
__UpperCAmelCase =AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=__SCREAMING_SNAKE_CASE , device_map=__SCREAMING_SNAKE_CASE , )
__UpperCAmelCase =Accelerator()
# This should not work and get value error
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
__UpperCAmelCase =accelerator.prepare(__SCREAMING_SNAKE_CASE )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def _a ( self : Optional[Any] ) -> str:
from transformers import AutoModelForCausalLM
with init_empty_weights():
__UpperCAmelCase =AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
__UpperCAmelCase =infer_auto_device_map(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =1
__UpperCAmelCase =AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=__SCREAMING_SNAKE_CASE , device_map=__SCREAMING_SNAKE_CASE , )
__UpperCAmelCase =Accelerator()
# This should work
__UpperCAmelCase =accelerator.prepare(__SCREAMING_SNAKE_CASE )
@require_cuda
def _a ( self : str ) -> List[Any]:
__UpperCAmelCase =torch.nn.Linear(10 , 10 )
__UpperCAmelCase =torch.optim.SGD(model.parameters() , lr=0.01 )
__UpperCAmelCase =Accelerator(cpu=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =accelerator.prepare(__SCREAMING_SNAKE_CASE )
| 68 |
"""simple docstring"""
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class _lowerCAmelCase ( lowerCamelCase ):
def _a ( self ) -> List[str]:
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def _a ( self ) -> Optional[int]:
with self.assertRaises(a_ ):
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def _a ( self ) -> int:
with self.assertRaises(a_ ):
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("bool" ) , type=Value("int64" ) ) )
def _a ( self ) -> Optional[Any]:
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _a ( self ) -> int:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , type=Value("int64" ) ) )
def _a ( self ) -> Dict:
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , try_type=Value("int64" ) ) )
self.assertEqual(arr.type , pa.string() )
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def _a ( self ) -> Tuple:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , type=ArrayaD((1, 3) , "int64" ) ) )
def _a ( self ) -> str:
_UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def _a ( self ) -> Tuple:
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def _a ( self ) -> List[str]:
import PIL.Image
_UpperCAmelCase = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"datasets.arrow_writer.cast_to_python_objects" , side_effect=a_ ) as mock_cast_to_python_objects:
_UpperCAmelCase = pa.array(TypedSequence([{"path": None, "bytes": B"image_bytes"}, pil_image] , type=Image() ) )
_UpperCAmelCase , _UpperCAmelCase = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("optimize_list_casting" , a_ )
self.assertFalse(kwargs["optimize_list_casting"] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferReader(UpperCamelCase__ ) if isinstance(UpperCamelCase__ , pa.Buffer ) else pa.memory_map(UpperCamelCase__ )
_UpperCAmelCase = pa.ipc.open_stream(UpperCamelCase__ )
_UpperCAmelCase = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = Features({"labels": ClassLabel(names=["neg", "pos"] )} )
with ArrowWriter(stream=UpperCamelCase__ , features=UpperCamelCase__ ) as writer:
writer.write({"labels": 0} )
writer.write({"labels": 1} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pa.ipc.open_stream(UpperCamelCase__ )
_UpperCAmelCase = f.read_all()
_UpperCAmelCase = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(UpperCamelCase__ )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ , hash_salt="split_name" , check_duplicates=UpperCamelCase__ , ) as writer:
with pytest.raises(UpperCamelCase__ ):
writer.write({"col_1": "foo", "col_2": 1} , key=[1, 2] )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ , hash_salt="split_name" , check_duplicates=UpperCamelCase__ , ) as writer:
with pytest.raises(UpperCamelCase__ ):
writer.write({"col_1": "foo", "col_2": 1} , key=10 )
writer.write({"col_1": "bar", "col_2": 2} , key=10 )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ , hash_salt="split_name" , check_duplicates=UpperCamelCase__ , ) as writer:
writer.write({"col_1": "foo", "col_2": 1} , key=1 )
writer.write({"col_1": "bar", "col_2": 2} , key=2 )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
writer.write_batch({"col_1": [], "col_2": []} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write_table(pa.Table.from_pydict({"col_1": ["foo", "bar"], "col_2": [1, 2]} ) )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write_row(pa.Table.from_pydict({"col_1": ["foo"], "col_2": [1]} ) )
writer.write_row(pa.Table.from_pydict({"col_1": ["bar"], "col_2": [2]} ) )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __lowerCamelCase ( ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
_UpperCAmelCase = os.path.join(UpperCamelCase__ , "test.arrow" )
with ArrowWriter(path=UpperCamelCase__ , schema=pa.schema(UpperCamelCase__ ) ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(UpperCamelCase__ , 1 )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if pa.types.is_list(UpperCamelCase__ ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if isinstance(lst[0] , UpperCamelCase__ ):
change_first_primitive_element_in_list(lst[0] , UpperCamelCase__ )
else:
_UpperCAmelCase = value
@pytest.mark.parametrize("optimized_int_type, expected_dtype" , [(None, pa.intaa()), (Value("int32" ), pa.intaa())] )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.array(TypedSequence(UpperCamelCase__ , optimized_int_type=UpperCamelCase__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"col, expected_dtype" , [
("attention_mask", pa.inta()),
("special_tokens_mask", pa.inta()),
("token_type_ids", pa.inta()),
("input_ids", pa.intaa()),
("other", pa.intaa()),
] , )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.array(OptimizedTypedSequence(UpperCamelCase__ , col=UpperCamelCase__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
_UpperCAmelCase = copy.deepcopy(UpperCamelCase__ )
_UpperCAmelCase = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase = pa.array(OptimizedTypedSequence(UpperCamelCase__ , col=UpperCamelCase__ ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("raise_exception" , [False, True] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = str(tmp_path / "dataset-train.arrow" )
try:
with ArrowWriter(path=UpperCamelCase__ ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = "mock://dataset-train.arrow"
with ArrowWriter(path=UpperCamelCase__ , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(UpperCamelCase__ ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(UpperCamelCase__ )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(stream=UpperCamelCase__ ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pq.read_table(UpperCamelCase__ )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("embed_local_files" , [False, True] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
import PIL.Image
_UpperCAmelCase = str(tmp_path / "test_image_rgb.jpg" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(UpperCamelCase__ , format="png" )
_UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(
stream=UpperCamelCase__ , features=Features({"image": Image()} ) , embed_local_files=UpperCamelCase__ ) as writer:
writer.write({"image": image_path} )
writer.finalize()
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pq.read_table(UpperCamelCase__ )
_UpperCAmelCase = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["image"][0]["path"] , UpperCamelCase__ )
with open(UpperCamelCase__ , "rb" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = pa.schema([pa.field("col_1" , pa.string() , nullable=UpperCamelCase__ )] )
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(stream=UpperCamelCase__ ) as writer:
writer._build_writer(inferred_schema=UpperCamelCase__ )
assert writer._schema == pa.schema([pa.field("col_1" , pa.string() )] )
| 657 | 0 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def __UpperCAmelCase ( _UpperCAmelCase : Any ) -> Union[str, Any]:
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : Dict , a_ : nn.Module , a_ : int ):
"""simple docstring"""
super().__init__()
__snake_case = module
__snake_case = nn.Sequential(
nn.Linear(module.in_features , a_ , bias=a_ ) , nn.Linear(a_ , module.out_features , bias=a_ ) , )
__snake_case = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=a_ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def A ( self : str , a_ : List[Any] , *a_ : int , **a_ : int ):
"""simple docstring"""
return self.module(a_ , *a_ , **a_ ) + self.adapter(a_ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
__SCREAMING_SNAKE_CASE = """bigscience/bloom-1b7"""
# Constant values
__SCREAMING_SNAKE_CASE = 2.109_6595_5269_2574
__SCREAMING_SNAKE_CASE = """Hello my name is"""
__SCREAMING_SNAKE_CASE = set()
EXPECTED_OUTPUTS.add("""Hello my name is John and I am a professional photographer. I""" )
EXPECTED_OUTPUTS.add("""Hello my name is John.\nI am a friend of your father.\n""" )
EXPECTED_OUTPUTS.add("""Hello my name is John Doe, I am a student at the University""" )
__SCREAMING_SNAKE_CASE = 10
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = AutoTokenizer.from_pretrained(self.model_name )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def A ( self : Optional[int] ):
"""simple docstring"""
super().setUp()
# Models and tokenizer
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="auto" )
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=a_ , device_map="auto" )
def A ( self : List[str] ):
"""simple docstring"""
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = self.model_abit.config
self.assertTrue(hasattr(a_ , "quantization_config" ) )
__snake_case = config.to_dict()
__snake_case = config.to_diff_dict()
__snake_case = config.to_json_string()
def A ( self : Optional[Any] ):
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
__snake_case = self.model_fpaa.get_memory_footprint()
__snake_case = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__snake_case = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def A ( self : int ):
"""simple docstring"""
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(a_ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def A ( self : Any ):
"""simple docstring"""
__snake_case = self.tokenizer(self.input_text , return_tensors="pt" )
__snake_case = self.model_abit.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=a_ ) , self.EXPECTED_OUTPUTS )
def A ( self : int ):
"""simple docstring"""
__snake_case = BitsAndBytesConfig()
__snake_case = True
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=a_ , device_map="auto" )
__snake_case = self.tokenizer(self.input_text , return_tensors="pt" )
__snake_case = model_abit_from_config.generate(
input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=a_ ) , self.EXPECTED_OUTPUTS )
def A ( self : Dict ):
"""simple docstring"""
with self.assertRaises(a_ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(a_ )
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = BitsAndBytesConfig()
with self.assertRaises(a_ ):
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=a_ , load_in_abit=a_ , device_map="auto" , bnb_abit_quant_type="nf4" , )
def A ( self : int ):
"""simple docstring"""
with self.assertRaises(a_ ):
# Tries with `str`
self.model_abit.to("cpu" )
with self.assertRaises(a_ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(a_ ):
# Tries with a `device`
self.model_abit.to(torch.device("cuda:0" ) )
with self.assertRaises(a_ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(a_ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__snake_case = self.tokenizer(self.input_text , return_tensors="pt" )
__snake_case = self.model_fpaa.to(torch.floataa )
__snake_case = self.model_fpaa.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
__snake_case = self.model_fpaa.to("cpu" )
# Check this does not throw an error
__snake_case = self.model_fpaa.half()
# Check this does not throw an error
__snake_case = self.model_fpaa.float()
def A ( self : Tuple ):
"""simple docstring"""
__snake_case = AutoModelForSeqaSeqLM.from_pretrained("t5-small" , load_in_abit=a_ , device_map="auto" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@classmethod
def A ( cls : Optional[int] ):
"""simple docstring"""
__snake_case = "t5-small"
__snake_case = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense
__snake_case = AutoTokenizer.from_pretrained(cls.model_name )
__snake_case = "Translate in German: Hello, my dog is cute"
def A ( self : List[str] ):
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
def A ( self : List[str] ):
"""simple docstring"""
from transformers import TaForConditionalGeneration
__snake_case = TaForConditionalGeneration._keep_in_fpaa_modules
__snake_case = None
# test with `t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=a_ , device_map="auto" )
__snake_case = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
__snake_case = model.generate(**a_ )
# test with `flan-t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=a_ , device_map="auto" )
__snake_case = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
__snake_case = model.generate(**a_ )
__snake_case = modules
def A ( self : Optional[Any] ):
"""simple docstring"""
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=a_ , device_map="auto" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__snake_case = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
__snake_case = model.generate(**a_ )
# test with `flan-t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=a_ , device_map="auto" )
__snake_case = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
__snake_case = model.generate(**a_ )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def A ( self : Optional[int] ):
"""simple docstring"""
super().setUp()
# model_name
__snake_case = "bigscience/bloom-560m"
__snake_case = "t5-small"
# Different types of model
__snake_case = AutoModel.from_pretrained(self.model_name , load_in_abit=a_ , device_map="auto" )
# Sequence classification model
__snake_case = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=a_ , device_map="auto" )
# CausalLM model
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=a_ , device_map="auto" )
# Seq2seq model
__snake_case = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=a_ , device_map="auto" )
def A ( self : Dict ):
"""simple docstring"""
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def A ( self : int ):
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def A ( self : str ):
"""simple docstring"""
super().setUp()
def A ( self : str ):
"""simple docstring"""
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def A ( self : Tuple ):
"""simple docstring"""
__snake_case = pipeline(
"text-generation" , model=self.model_name , model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__snake_case = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["generated_text"] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def A ( self : Any ):
"""simple docstring"""
super().setUp()
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=a_ , device_map="balanced" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__snake_case = self.tokenizer(self.input_text , return_tensors="pt" )
# Second real batch
__snake_case = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=a_ ) , self.EXPECTED_OUTPUTS )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def A ( self : int ):
"""simple docstring"""
__snake_case = "facebook/opt-350m"
super().setUp()
def A ( self : Dict ):
"""simple docstring"""
if version.parse(importlib.metadata.version("bitsandbytes" ) ) < version.parse("0.37.0" ):
return
# Step 1: freeze all parameters
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=a_ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__snake_case = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__snake_case = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(a_ ) ):
__snake_case = LoRALayer(module.q_proj , rank=16 )
__snake_case = LoRALayer(module.k_proj , rank=16 )
__snake_case = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
__snake_case = self.tokenizer("Test batch " , return_tensors="pt" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__snake_case = model.forward(**a_ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(a_ , a_ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(a_ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """gpt2-xl"""
__SCREAMING_SNAKE_CASE = 3.3191_8548_5415_2187
| 69 |
"""simple docstring"""
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class _lowerCAmelCase ( unittest.TestCase ):
def _a ( self ) -> Optional[Any]:
_UpperCAmelCase = ["a", "b", "c"]
# Defaults to last layer if both are None
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(a_ , a_ , a_ )
self.assertEqual(a_ , ["c"] )
self.assertEqual(a_ , [2] )
# Out indices set to match out features
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(["a", "c"] , a_ , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features set to match out indices
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(a_ , [0, 2] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features selected from negative indices
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(a_ , [-3, -1] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [-3, -1] )
def _a ( self ) -> Optional[int]:
# Stage names must be set
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , a_ )
# Out features must be a list
with self.assertRaises(a_ ):
verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] )
# Out features must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] )
# Out indices must be a list or tuple
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , 0 , ["a", "b"] )
# Out indices must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , (0, 1) , ["a"] )
# Out features and out indices must be the same length
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] )
# Out features should match out indices
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] )
# Out features and out indices should be in order
with self.assertRaises(a_ ):
verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] )
# Check passes with valid inputs
verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] )
def _a ( self ) -> int:
_UpperCAmelCase = BackboneMixin()
_UpperCAmelCase = ["a", "b", "c"]
_UpperCAmelCase = ["a", "c"]
_UpperCAmelCase = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
_UpperCAmelCase = ["a", "b"]
self.assertEqual(backbone.out_features , ["a", "b"] )
self.assertEqual(backbone.out_indices , [0, 1] )
_UpperCAmelCase = [-3, -1]
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 657 | 0 |
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = CustomTokenizer
pass
| 70 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 0 |
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[Any] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Dict = [False] * len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = []
queue.append(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = True
while queue:
UpperCAmelCase_ : Tuple = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = True
UpperCAmelCase_ : Optional[Any] = u
return visited[t]
def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : str = [-1] * (len(_SCREAMING_SNAKE_CASE ))
UpperCAmelCase_ : Any = 0
while bfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : List[Any] = float("Inf" )
UpperCAmelCase_ : List[Any] = sink
while s != source:
# Find the minimum value in select path
UpperCAmelCase_ : List[str] = min(_SCREAMING_SNAKE_CASE , graph[parent[s]][s] )
UpperCAmelCase_ : str = parent[s]
max_flow += path_flow
UpperCAmelCase_ : Dict = sink
while v != source:
UpperCAmelCase_ : Dict = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
UpperCAmelCase_ : Optional[Any] = parent[v]
return max_flow
_lowerCamelCase = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_lowerCamelCase , _lowerCamelCase = 0, 5
print(ford_fulkerson(graph, source, sink))
| 71 |
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class _lowerCAmelCase ( lowerCamelCase , unittest.TestCase ):
lowercase_ : Tuple = BarthezTokenizer
lowercase_ : List[Any] = BarthezTokenizerFast
lowercase_ : Dict = True
lowercase_ : int = True
def _a ( self ) -> Any:
super().setUp()
_UpperCAmelCase = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=a_ )
_UpperCAmelCase = tokenizer
def _a ( self ) -> List[Any]:
_UpperCAmelCase = "<pad>"
_UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(a_ ) , 101122 )
def _a ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def _a ( self ) -> List[Any]:
_UpperCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_UpperCAmelCase = [0, 57, 3018, 70307, 91, 2]
_UpperCAmelCase = self.tokenizer(
a_ , max_length=len(a_ ) , padding=a_ , truncation=a_ , return_tensors="pt" )
self.assertIsInstance(a_ , a_ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
_UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(a_ , a_ )
def _a ( self ) -> str:
if not self.test_rust_tokenizer:
return
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = "I was born in 92000, and this is falsé."
_UpperCAmelCase = tokenizer.tokenize(a_ )
_UpperCAmelCase = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
_UpperCAmelCase = tokenizer.encode(a_ , add_special_tokens=a_ )
_UpperCAmelCase = rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = tokenizer.encode(a_ )
_UpperCAmelCase = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
@slow
def _a ( self ) -> Dict:
# fmt: off
_UpperCAmelCase = {"input_ids": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_UpperCAmelCase = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=a_ , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=a_ , )
| 657 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
_UpperCAmelCase : Dict = logging.get_logger(__name__)
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = ['pixel_values']
def __init__( self , snake_case_ = True , snake_case_ = None , snake_case_ = PILImageResampling.BILINEAR , snake_case_ = True , snake_case_ = 1 / 2_55 , snake_case_ = True , snake_case_ = None , snake_case_ = True , **snake_case_ , ):
super().__init__(**snake_case_ )
lowercase =size if size is not None else {'''shortest_edge''': 2_24}
lowercase =get_size_dict(snake_case_ , default_to_square=snake_case_ )
lowercase =crop_size if crop_size is not None else {'''height''': 2_56, '''width''': 2_56}
lowercase =get_size_dict(snake_case_ , param_name='''crop_size''' )
lowercase =do_resize
lowercase =size
lowercase =resample
lowercase =do_rescale
lowercase =rescale_factor
lowercase =do_center_crop
lowercase =crop_size
lowercase =do_flip_channel_order
def _A( self , snake_case_ , snake_case_ , snake_case_ = PIL.Image.BILINEAR , snake_case_ = None , **snake_case_ , ):
lowercase =get_size_dict(snake_case_ , default_to_square=snake_case_ )
if "shortest_edge" not in size:
raise ValueError(f'The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}' )
lowercase =get_resize_output_image_size(snake_case_ , size=size['''shortest_edge'''] , default_to_square=snake_case_ )
return resize(snake_case_ , size=snake_case_ , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
def _A( self , snake_case_ , snake_case_ , snake_case_ = None , **snake_case_ , ):
lowercase =get_size_dict(snake_case_ )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(snake_case_ , size=(size['''height'''], size['''width''']) , data_format=snake_case_ , **snake_case_ )
def _A( self , snake_case_ , snake_case_ , snake_case_ = None , **snake_case_ , ):
return rescale(snake_case_ , scale=snake_case_ , data_format=snake_case_ , **snake_case_ )
def _A( self , snake_case_ , snake_case_ = None ):
return flip_channel_order(snake_case_ , data_format=snake_case_ )
def _A( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = ChannelDimension.FIRST , **snake_case_ , ):
lowercase =do_resize if do_resize is not None else self.do_resize
lowercase =resample if resample is not None else self.resample
lowercase =do_rescale if do_rescale is not None else self.do_rescale
lowercase =rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase =do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase =(
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
lowercase =size if size is not None else self.size
lowercase =get_size_dict(snake_case_ , default_to_square=snake_case_ )
lowercase =crop_size if crop_size is not None else self.crop_size
lowercase =get_size_dict(snake_case_ , param_name='''crop_size''' )
lowercase =make_list_of_images(snake_case_ )
if not valid_images(snake_case_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
# All transformations expect numpy arrays.
lowercase =[to_numpy_array(snake_case_ ) for image in images]
if do_resize:
lowercase =[self.resize(image=snake_case_ , size=snake_case_ , resample=snake_case_ ) for image in images]
if do_center_crop:
lowercase =[self.center_crop(image=snake_case_ , size=snake_case_ ) for image in images]
if do_rescale:
lowercase =[self.rescale(image=snake_case_ , scale=snake_case_ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
lowercase =[self.flip_channel_order(image=snake_case_ ) for image in images]
lowercase =[to_channel_dimension_format(snake_case_ , snake_case_ ) for image in images]
lowercase ={'''pixel_values''': images}
return BatchFeature(data=snake_case_ , tensor_type=snake_case_ )
def _A( self , snake_case_ , snake_case_ = None ):
lowercase =outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(snake_case_ ):
lowercase =target_sizes.numpy()
lowercase =[]
for idx in range(len(snake_case_ ) ):
lowercase =torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=snake_case_ )
lowercase =resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(snake_case_ )
else:
lowercase =logits.argmax(dim=1 )
lowercase =[semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 72 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_UpperCAmelCase = f"Input value of [number={number}] must be an integer"
raise TypeError(UpperCamelCase__ )
if number < 0:
return False
_UpperCAmelCase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
a_ : Any = logging.get_logger(__name__)
a_ : Any = {
'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class _snake_case ( A__ ):
_lowercase : Any = '''gpt_neo'''
_lowercase : int = ['''past_key_values''']
_lowercase : Tuple = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self , a=5_0257 , a=2048 , a=2048 , a=24 , a=[[["global", "local"], 12]] , a=16 , a=None , a=256 , a="gelu_new" , a=0.0 , a=0.0 , a=0.0 , a=0.1 , a=1E-5 , a=0.02 , a=True , a=5_0256 , a=5_0256 , **a , ) -> Tuple:
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_layers
SCREAMING_SNAKE_CASE = num_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = window_size
SCREAMING_SNAKE_CASE = activation_function
SCREAMING_SNAKE_CASE = resid_dropout
SCREAMING_SNAKE_CASE = embed_dropout
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = classifier_dropout
SCREAMING_SNAKE_CASE = layer_norm_epsilon
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = bos_token_id
SCREAMING_SNAKE_CASE = eos_token_id
SCREAMING_SNAKE_CASE = attention_types
SCREAMING_SNAKE_CASE = self.expand_attention_types_params(a)
if len(self.attention_layers) != self.num_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.attention_layers)` == `config.num_layers` '
f'''but is `len(config.attention_layers) = {len(self.attention_layers)}`, '''
f'''`config.num_layers = {self.num_layers}`. '''
'`config.attention_layers` is prepared using `config.attention_types`. '
'Please verify the value of `config.attention_types` argument.')
super().__init__(bos_token_id=a , eos_token_id=a , **a)
@staticmethod
def SCREAMING_SNAKE_CASE__ ( a) -> Any:
SCREAMING_SNAKE_CASE = []
for item in attention_types:
for _ in range(item[1]):
attentions.extend(item[0])
return attentions
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
import torch
SCREAMING_SNAKE_CASE = input.size()
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = shape[dimension]
SCREAMING_SNAKE_CASE = torch.arange(0 , _UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = torch.div(sizedim - size , _UpperCAmelCase , rounding_mode='floor') + 1
SCREAMING_SNAKE_CASE = torch.arange(_UpperCAmelCase) + low_indices[:min_length][:, None]
SCREAMING_SNAKE_CASE = [slice(_UpperCAmelCase)] * rank
SCREAMING_SNAKE_CASE = indices
SCREAMING_SNAKE_CASE = input[s]
SCREAMING_SNAKE_CASE = list(range(0 , rank + 1))
perm.append(perm.pop(dimension + 1))
return sliced.permute(_UpperCAmelCase)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
import torch
SCREAMING_SNAKE_CASE = torch.arange(1 , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = torch.remainder(_UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = remainders == 0
SCREAMING_SNAKE_CASE = candidates[divisor_indices]
SCREAMING_SNAKE_CASE = torch.max(_UpperCAmelCase)
return largest_divisor, torch.div(_UpperCAmelCase , _UpperCAmelCase , rounding_mode='floor')
class _snake_case ( A__ ):
@property
def SCREAMING_SNAKE_CASE__ ( self) -> Mapping[str, Mapping[int, str]]:
SCREAMING_SNAKE_CASE = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}})
if self.use_past:
self.fill_with_past_key_values_(a , direction='inputs')
SCREAMING_SNAKE_CASE = {0: 'batch', 1: 'past_sequence + sequence'}
else:
SCREAMING_SNAKE_CASE = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def SCREAMING_SNAKE_CASE__ ( self) -> int:
return self._config.num_heads
def SCREAMING_SNAKE_CASE__ ( self , a , a = -1 , a = -1 , a = False , a = None , ) -> Mapping[str, Any]:
SCREAMING_SNAKE_CASE = super(a , self).generate_dummy_inputs(
a , batch_size=a , seq_length=a , is_pair=a , framework=a)
# We need to order the input in the way they appears in the forward()
SCREAMING_SNAKE_CASE = OrderedDict({'input_ids': common_inputs['input_ids']})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE = seqlen + 2
SCREAMING_SNAKE_CASE = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
SCREAMING_SNAKE_CASE = [
(torch.zeros(a), torch.zeros(a)) for _ in range(self.num_layers)
]
SCREAMING_SNAKE_CASE = common_inputs['attention_mask']
if self.use_past:
SCREAMING_SNAKE_CASE = ordered_inputs['attention_mask'].dtype
SCREAMING_SNAKE_CASE = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(a , a , dtype=a)] , dim=1)
return ordered_inputs
@property
def SCREAMING_SNAKE_CASE__ ( self) -> int:
return 13
| 73 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = Dict[str, Any]
__magic_name__ = List[Prediction]
@add_end_docstrings(lowerCamelCase )
class _lowerCAmelCase ( lowerCamelCase ):
def __init__( self , *a_ , **a_ ) -> Optional[int]:
super().__init__(*a_ , **a_ )
if self.framework == "tf":
raise ValueError(f"The {self.__class__} is only available in PyTorch." )
requires_backends(self , "vision" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def _a ( self , **a_ ) -> List[str]:
_UpperCAmelCase = {}
if "threshold" in kwargs:
_UpperCAmelCase = kwargs["threshold"]
return {}, {}, postprocess_kwargs
def __call__( self , *a_ , **a_ ) -> Union[Predictions, List[Prediction]]:
return super().__call__(*a_ , **a_ )
def _a ( self , a_ ) -> Optional[Any]:
_UpperCAmelCase = load_image(a_ )
_UpperCAmelCase = torch.IntTensor([[image.height, image.width]] )
_UpperCAmelCase = self.image_processor(images=[image] , return_tensors="pt" )
if self.tokenizer is not None:
_UpperCAmelCase = self.tokenizer(text=inputs["words"] , boxes=inputs["boxes"] , return_tensors="pt" )
_UpperCAmelCase = target_size
return inputs
def _a ( self , a_ ) -> Optional[Any]:
_UpperCAmelCase = model_inputs.pop("target_size" )
_UpperCAmelCase = self.model(**a_ )
_UpperCAmelCase = outputs.__class__({"target_size": target_size, **outputs} )
if self.tokenizer is not None:
_UpperCAmelCase = model_inputs["bbox"]
return model_outputs
def _a ( self , a_ , a_=0.9 ) -> int:
_UpperCAmelCase = model_outputs["target_size"]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
_UpperCAmelCase , _UpperCAmelCase = target_size[0].tolist()
def unnormalize(a_ ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
_UpperCAmelCase , _UpperCAmelCase = model_outputs["logits"].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
_UpperCAmelCase = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
_UpperCAmelCase = [unnormalize(a_ ) for bbox in model_outputs["bbox"].squeeze(0 )]
_UpperCAmelCase = ["score", "label", "box"]
_UpperCAmelCase = [dict(zip(a_ , a_ ) ) for vals in zip(scores.tolist() , a_ , a_ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
_UpperCAmelCase = self.image_processor.post_process_object_detection(a_ , a_ , a_ )
_UpperCAmelCase = raw_annotations[0]
_UpperCAmelCase = raw_annotation["scores"]
_UpperCAmelCase = raw_annotation["labels"]
_UpperCAmelCase = raw_annotation["boxes"]
_UpperCAmelCase = scores.tolist()
_UpperCAmelCase = [self.model.config.idalabel[label.item()] for label in labels]
_UpperCAmelCase = [self._get_bounding_box(a_ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
_UpperCAmelCase = ["score", "label", "box"]
_UpperCAmelCase = [
dict(zip(a_ , a_ ) )
for vals in zip(raw_annotation["scores"] , raw_annotation["labels"] , raw_annotation["boxes"] )
]
return annotation
def _a ( self , a_ ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError("The ObjectDetectionPipeline is only available in PyTorch." )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = box.int().tolist()
_UpperCAmelCase = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 657 | 0 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowercase_ = """src/diffusers"""
lowercase_ = """."""
# This is to make sure the diffusers module imported is the one in the repo.
lowercase_ = importlib.util.spec_from_file_location(
"""diffusers""",
os.path.join(DIFFUSERS_PATH, """__init__.py"""),
submodule_search_locations=[DIFFUSERS_PATH],
)
lowercase_ = spec.loader.load_module()
def a__ ( snake_case , snake_case ):
"""simple docstring"""
return line.startswith(snake_case ) or len(snake_case ) <= 1 or re.search(R'''^\s*\)(\s*->.*:|:)\s*$''' , snake_case ) is not None
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = object_name.split('''.''' )
__SCREAMING_SNAKE_CASE : str = 0
# First let's find the module where our object lives.
__SCREAMING_SNAKE_CASE : Any = parts[i]
while i < len(snake_case ) and not os.path.isfile(os.path.join(snake_case , F'''{module}.py''' ) ):
i += 1
if i < len(snake_case ):
__SCREAMING_SNAKE_CASE : str = os.path.join(snake_case , parts[i] )
if i >= len(snake_case ):
raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' )
with open(os.path.join(snake_case , F'''{module}.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__SCREAMING_SNAKE_CASE : Dict = f.readlines()
# Now let's find the class / func in the code!
__SCREAMING_SNAKE_CASE : Union[str, Any] = ''''''
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for name in parts[i + 1 :]:
while (
line_index < len(snake_case ) and re.search(RF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(snake_case ):
raise ValueError(F''' {object_name} does not match any function or class in {module}.''' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
__SCREAMING_SNAKE_CASE : List[Any] = line_index
while line_index < len(snake_case ) and _should_continue(lines[line_index] , snake_case ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__SCREAMING_SNAKE_CASE : Dict = lines[start_index:line_index]
return "".join(snake_case )
lowercase_ = re.compile(R"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""")
lowercase_ = re.compile(R"""^\s*(\S+)->(\S+)(\s+.*|$)""")
lowercase_ = re.compile(R"""<FILL\s+[^>]*>""")
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = code.split('''\n''' )
__SCREAMING_SNAKE_CASE : Dict = 0
while idx < len(snake_case ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(snake_case ):
return re.search(R'''^(\s*)\S''' , lines[idx] ).groups()[0]
return ""
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = len(get_indent(snake_case ) ) > 0
if has_indent:
__SCREAMING_SNAKE_CASE : List[Any] = F'''class Bla:\n{code}'''
__SCREAMING_SNAKE_CASE : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=snake_case )
__SCREAMING_SNAKE_CASE : Optional[int] = black.format_str(snake_case , mode=snake_case )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = style_docstrings_in_code(snake_case )
return result[len('''class Bla:\n''' ) :] if has_indent else result
def a__ ( snake_case , snake_case=False ):
"""simple docstring"""
with open(snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__SCREAMING_SNAKE_CASE : List[str] = f.readlines()
__SCREAMING_SNAKE_CASE : Optional[Any] = []
__SCREAMING_SNAKE_CASE : int = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(snake_case ):
__SCREAMING_SNAKE_CASE : Dict = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = search.groups()
__SCREAMING_SNAKE_CASE : int = find_code_in_diffusers(snake_case )
__SCREAMING_SNAKE_CASE : str = get_indent(snake_case )
__SCREAMING_SNAKE_CASE : Any = line_index + 1 if indent == theoretical_indent else line_index + 2
__SCREAMING_SNAKE_CASE : Dict = theoretical_indent
__SCREAMING_SNAKE_CASE : Optional[int] = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
__SCREAMING_SNAKE_CASE : List[Any] = True
while line_index < len(snake_case ) and should_continue:
line_index += 1
if line_index >= len(snake_case ):
break
__SCREAMING_SNAKE_CASE : Any = lines[line_index]
__SCREAMING_SNAKE_CASE : Optional[Any] = _should_continue(snake_case , snake_case ) and re.search(F'''^{indent}# End copy''' , snake_case ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__SCREAMING_SNAKE_CASE : List[str] = lines[start_index:line_index]
__SCREAMING_SNAKE_CASE : Dict = ''''''.join(snake_case )
# Remove any nested `Copied from` comments to avoid circular copies
__SCREAMING_SNAKE_CASE : Tuple = [line for line in theoretical_code.split('''\n''' ) if _re_copy_warning.search(snake_case ) is None]
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''\n'''.join(snake_case )
# Before comparing, use the `replace_pattern` on the original code.
if len(snake_case ) > 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = replace_pattern.replace('''with''' , '''''' ).split(''',''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = [_re_replace_pattern.search(snake_case ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = pattern.groups()
__SCREAMING_SNAKE_CASE : str = re.sub(snake_case , snake_case , snake_case )
if option.strip() == "all-casing":
__SCREAMING_SNAKE_CASE : Optional[Any] = re.sub(obja.lower() , obja.lower() , snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = re.sub(obja.upper() , obja.upper() , snake_case )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
__SCREAMING_SNAKE_CASE : Optional[Any] = blackify(lines[start_index - 1] + theoretical_code )
__SCREAMING_SNAKE_CASE : int = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
__SCREAMING_SNAKE_CASE : Optional[int] = lines[:start_index] + [theoretical_code] + lines[line_index:]
__SCREAMING_SNAKE_CASE : str = start_index + 1
if overwrite and len(snake_case ) > 0:
# Warn the user a file has been modified.
print(F'''Detected changes, rewriting {filename}.''' )
with open(snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(snake_case )
return diffs
def a__ ( snake_case = False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = glob.glob(os.path.join(snake_case , '''**/*.py''' ) , recursive=snake_case )
__SCREAMING_SNAKE_CASE : Tuple = []
for filename in all_files:
__SCREAMING_SNAKE_CASE : int = is_copy_consistent(snake_case , snake_case )
diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(snake_case ) > 0:
__SCREAMING_SNAKE_CASE : Optional[int] = '''\n'''.join(snake_case )
raise Exception(
'''Found the following copy inconsistencies:\n'''
+ diff
+ '''\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
lowercase_ = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 74 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def merge(UpperCamelCase__ , UpperCamelCase__ ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(UpperCamelCase__ ) <= 1:
return collection
_UpperCAmelCase = len(UpperCamelCase__ ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__magic_name__ = input('''Enter numbers separated by a comma:\n''').strip()
__magic_name__ = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 657 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class lowerCamelCase_ :
def __init__( self : Any , _A : int , ):
'''simple docstring'''
UpperCAmelCase__ : Any = parent
UpperCAmelCase__ : str = 13
UpperCAmelCase__ : int = 7
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : List[str] = True
UpperCAmelCase__ : Dict = True
UpperCAmelCase__ : Dict = 99
UpperCAmelCase__ : List[Any] = 32
UpperCAmelCase__ : List[str] = 2
UpperCAmelCase__ : Any = 4
UpperCAmelCase__ : str = 37
UpperCAmelCase__ : Any = '''gelu'''
UpperCAmelCase__ : Optional[int] = 0.1
UpperCAmelCase__ : Dict = 0.1
UpperCAmelCase__ : Tuple = 512
UpperCAmelCase__ : str = 16
UpperCAmelCase__ : List[Any] = 2
UpperCAmelCase__ : Union[str, Any] = 0.0_2
UpperCAmelCase__ : Union[str, Any] = 3
UpperCAmelCase__ : Any = 4
UpperCAmelCase__ : Any = None
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : Optional[int] = None
if self.use_input_mask:
UpperCAmelCase__ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ : Dict = None
UpperCAmelCase__ : int = None
UpperCAmelCase__ : Dict = None
if self.use_labels:
UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ : Any = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Dict = self.prepare_config_and_inputs()
UpperCAmelCase__ : List[str] = True
UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowercase_ ( self : Optional[Any] , _A : Union[str, Any] , _A : Union[str, Any] , _A : int , _A : int , _A : Union[str, Any] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : Any = TFEsmModel(config=_A )
UpperCAmelCase__ : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
UpperCAmelCase__ : str = model(_A )
UpperCAmelCase__ : str = [input_ids, input_mask]
UpperCAmelCase__ : List[Any] = model(_A )
UpperCAmelCase__ : Optional[int] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self : List[str] , _A : str , _A : Tuple , _A : Any , _A : Any , _A : Union[str, Any] , _A : int , _A : Optional[Any] , _A : str , ):
'''simple docstring'''
UpperCAmelCase__ : int = True
UpperCAmelCase__ : Any = TFEsmModel(config=_A )
UpperCAmelCase__ : Tuple = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''encoder_hidden_states''': encoder_hidden_states,
'''encoder_attention_mask''': encoder_attention_mask,
}
UpperCAmelCase__ : List[str] = model(_A )
UpperCAmelCase__ : Union[str, Any] = [input_ids, input_mask]
UpperCAmelCase__ : List[str] = model(_A , encoder_hidden_states=_A )
# Also check the case where encoder outputs are not passed
UpperCAmelCase__ : str = model(_A , attention_mask=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self : List[str] , _A : int , _A : Tuple , _A : List[Any] , _A : Dict , _A : List[Any] , _A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = TFEsmForMaskedLM(config=_A )
UpperCAmelCase__ : str = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self : int , _A : List[str] , _A : List[Any] , _A : List[Any] , _A : str , _A : Union[str, Any] , _A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.num_labels
UpperCAmelCase__ : Any = TFEsmForTokenClassification(config=_A )
UpperCAmelCase__ : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
UpperCAmelCase__ : List[Any] = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Union[str, Any] = config_and_inputs
UpperCAmelCase__ : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = TFEsmModelTester(self )
UpperCAmelCase__ : Optional[Any] = ConfigTester(self , config_class=_A , hidden_size=37 )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def lowercase_ ( self : str ):
'''simple docstring'''
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : int = TFEsmModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@unittest.skip('''Protein models do not support embedding resizing.''' )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip('''Protein models do not support embedding resizing.''' )
def lowercase_ ( self : Dict ):
'''simple docstring'''
pass
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Dict = model_class(_A )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
UpperCAmelCase__ : List[Any] = model.get_bias()
assert isinstance(_A , _A )
for k, v in name.items():
assert isinstance(_A , tf.Variable )
else:
UpperCAmelCase__ : Union[str, Any] = model.get_output_embeddings()
assert x is None
UpperCAmelCase__ : Tuple = model.get_bias()
assert name is None
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = TFEsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
UpperCAmelCase__ : Optional[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase__ : Optional[Any] = model(_A )[0]
UpperCAmelCase__ : List[Any] = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , _A )
# compare the actual values for a slice.
UpperCAmelCase__ : Dict = tf.constant(
[
[
[8.9_2_1_5_1_8, -1_0.5_8_9_8_1_4, -6.4_6_7_1_3_0_7],
[-6.3_9_6_7_1_5_6, -1_3.9_1_1_3_7_7, -1.1_2_1_1_9_1_5],
[-7.7_8_1_2_4_7, -1_3.9_5_1_5_5_7, -3.7_4_0_5_9_2],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = TFEsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
UpperCAmelCase__ : List[Any] = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
UpperCAmelCase__ : Any = model(_A )[0]
# compare the actual values for a slice.
UpperCAmelCase__ : Optional[int] = tf.constant(
[
[
[0.1_4_4_4_3_0_9_2, 0.5_4_1_2_5_3_2_7, 0.3_2_4_7_7_3_9],
[0.3_0_3_4_0_4_8_4, 0.0_0_5_2_6_6_7_6, 0.3_1_0_7_7_7_2_2],
[0.3_2_2_7_8_0_4_3, -0.2_4_9_8_7_0_9_6, 0.3_4_1_4_6_2_8],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 75 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _lowerCAmelCase :
def __init__( self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ) -> List[str]:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = self.vocab_size - 1
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
_UpperCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> Optional[int]:
_UpperCAmelCase = OpenAIGPTModel(config=a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(a_ , token_type_ids=a_ , head_mask=a_ )
_UpperCAmelCase = model(a_ , token_type_ids=a_ )
_UpperCAmelCase = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> List[Any]:
_UpperCAmelCase = OpenAIGPTLMHeadModel(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> Optional[Any]:
_UpperCAmelCase = OpenAIGPTDoubleHeadsModel(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> Dict:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = OpenAIGPTForSequenceClassification(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase_ : Any = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowercase_ : Optional[Any] = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowercase_ : Union[str, Any] = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _a ( self , a_ , a_ , a_ , a_ , a_ ) -> Any:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _a ( self , a_ , a_ , a_=False ) -> Optional[int]:
_UpperCAmelCase = super()._prepare_for_class(a_ , a_ , return_labels=a_ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=a_ , )
_UpperCAmelCase = inputs_dict["labels"]
_UpperCAmelCase = inputs_dict["labels"]
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=a_ , )
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
return inputs_dict
def _a ( self ) -> Optional[int]:
_UpperCAmelCase = OpenAIGPTModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=a_ , n_embd=37 )
def _a ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _a ( self ) -> Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*a_ )
def _a ( self ) -> Tuple:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*a_ )
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*a_ )
@slow
def _a ( self ) -> int:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = OpenAIGPTModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def _a ( self ) -> Any:
_UpperCAmelCase = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" )
model.to(a_ )
_UpperCAmelCase = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=a_ ) # the president is
_UpperCAmelCase = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
_UpperCAmelCase = model.generate(a_ , do_sample=a_ )
self.assertListEqual(output_ids[0].tolist() , a_ )
| 657 | 0 |
"""simple docstring"""
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : str = 0
for ch in input_str:
__lowercase : Union[str, Any] = ord(__UpperCamelCase )
__lowercase : Optional[int] = pow(2 , __UpperCamelCase )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 76 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__=10 ):
"""simple docstring"""
_UpperCAmelCase = []
for _ in range(UpperCamelCase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__=10 ):
"""simple docstring"""
_UpperCAmelCase = []
for step in range(UpperCamelCase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = os.path.join(UpperCamelCase__ , "schedule.bin" )
torch.save(scheduler.state_dict() , UpperCamelCase__ )
_UpperCAmelCase = torch.load(UpperCamelCase__ )
scheduler.load_state_dict(UpperCamelCase__ )
return lrs
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def _a ( self , a_ , a_ , a_ ) -> Optional[int]:
self.assertEqual(len(a_ ) , len(a_ ) )
for a, b in zip(a_ , a_ ):
self.assertAlmostEqual(a_ , a_ , delta=a_ )
def _a ( self ) -> str:
_UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=a_ )
_UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
_UpperCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_UpperCAmelCase = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(100 ):
_UpperCAmelCase = criterion(a_ , a_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=a_ )
_UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
_UpperCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_UpperCAmelCase = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=a_ , weight_decay=0.0 , relative_step=a_ , scale_parameter=a_ , warmup_init=a_ , )
for _ in range(1000 ):
_UpperCAmelCase = criterion(a_ , a_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
lowercase_ : List[Any] = nn.Linear(50 , 50 ) if is_torch_available() else None
lowercase_ : Tuple = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
lowercase_ : Dict = 10
def _a ( self , a_ , a_ , a_ , a_=None ) -> Union[str, Any]:
self.assertEqual(len(a_ ) , len(a_ ) )
for a, b in zip(a_ , a_ ):
self.assertAlmostEqual(a_ , a_ , delta=a_ , msg=a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
_UpperCAmelCase = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
_UpperCAmelCase , _UpperCAmelCase = data
_UpperCAmelCase = scheduler_func(self.optimizer , **a_ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
_UpperCAmelCase = unwrap_schedule(a_ , self.num_steps )
self.assertListAlmostEqual(
a_ , a_ , tol=1e-2 , msg=f"failed for {scheduler_func} in normal scheduler" , )
_UpperCAmelCase = scheduler_func(self.optimizer , **a_ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(a_ ) # wrap to test picklability of the schedule
_UpperCAmelCase = unwrap_and_save_reload_schedule(a_ , self.num_steps )
self.assertListEqual(a_ , a_ , msg=f"failed for {scheduler_func} in save and reload" )
class _lowerCAmelCase :
def __init__( self , a_ ) -> Union[str, Any]:
_UpperCAmelCase = fn
def __call__( self , *a_ , **a_ ) -> Union[str, Any]:
return self.fn(*a_ , **a_ )
@classmethod
def _a ( self , a_ ) -> Dict:
_UpperCAmelCase = list(map(self , scheduler.lr_lambdas ) )
| 657 | 0 |
"""simple docstring"""
from __future__ import annotations
import time
A = list[tuple[int, int]]
A = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
A = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class a__ :
def __init__( self : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : Node | None):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = pos_x
__UpperCAmelCase : List[str] = pos_y
__UpperCAmelCase : Optional[int] = (pos_y, pos_x)
__UpperCAmelCase : Union[str, Any] = goal_x
__UpperCAmelCase : Optional[Any] = goal_y
__UpperCAmelCase : List[str] = parent
class a__ :
def __init__( self : Optional[int] , UpperCamelCase_ : tuple[int, int] , UpperCamelCase_ : tuple[int, int]):
"""simple docstring"""
__UpperCAmelCase : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , UpperCamelCase_)
__UpperCAmelCase : List[str] = Node(goal[1] , goal[0] , goal[1] , goal[0] , UpperCamelCase_)
__UpperCAmelCase : int = [self.start]
__UpperCAmelCase : Tuple = False
def a_ ( self : str):
"""simple docstring"""
while self.node_queue:
__UpperCAmelCase : Dict = self.node_queue.pop(0)
if current_node.pos == self.target.pos:
__UpperCAmelCase : List[str] = True
return self.retrace_path(UpperCamelCase_)
__UpperCAmelCase : Union[str, Any] = self.get_successors(UpperCamelCase_)
for node in successors:
self.node_queue.append(UpperCamelCase_)
if not self.reached:
return [self.start.pos]
return None
def a_ ( self : Tuple , UpperCamelCase_ : Node):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = []
for action in delta:
__UpperCAmelCase : Union[str, Any] = parent.pos_x + action[1]
__UpperCAmelCase : Dict = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(UpperCamelCase_) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(UpperCamelCase_ , UpperCamelCase_ , self.target.pos_y , self.target.pos_x , UpperCamelCase_))
return successors
def a_ ( self : Any , UpperCamelCase_ : Node | None):
"""simple docstring"""
__UpperCAmelCase : str = node
__UpperCAmelCase : Dict = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x))
__UpperCAmelCase : List[Any] = current_node.parent
path.reverse()
return path
class a__ :
def __init__( self : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : List[Any]):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = BreadthFirstSearch(UpperCamelCase_ , UpperCamelCase_)
__UpperCAmelCase : List[str] = BreadthFirstSearch(UpperCamelCase_ , UpperCamelCase_)
__UpperCAmelCase : int = False
def a_ ( self : Any):
"""simple docstring"""
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
__UpperCAmelCase : Any = self.fwd_bfs.node_queue.pop(0)
__UpperCAmelCase : str = self.bwd_bfs.node_queue.pop(0)
if current_bwd_node.pos == current_fwd_node.pos:
__UpperCAmelCase : List[Any] = True
return self.retrace_bidirectional_path(
UpperCamelCase_ , UpperCamelCase_)
__UpperCAmelCase : List[str] = current_bwd_node
__UpperCAmelCase : List[Any] = current_fwd_node
__UpperCAmelCase : Dict = {
self.fwd_bfs: self.fwd_bfs.get_successors(UpperCamelCase_),
self.bwd_bfs: self.bwd_bfs.get_successors(UpperCamelCase_),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(UpperCamelCase_)
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def a_ ( self : List[Any] , UpperCamelCase_ : Node , UpperCamelCase_ : Node):
"""simple docstring"""
__UpperCAmelCase : str = self.fwd_bfs.retrace_path(UpperCamelCase_)
__UpperCAmelCase : int = self.bwd_bfs.retrace_path(UpperCamelCase_)
bwd_path.pop()
bwd_path.reverse()
__UpperCAmelCase : Optional[Any] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
A = (0, 0)
A = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
A = time.time()
A = BreadthFirstSearch(init, goal)
A = bfs.search()
A = time.time() - start_bfs_time
print("""Unidirectional BFS computation time : """, bfs_time)
A = time.time()
A = BidirectionalBreadthFirstSearch(init, goal)
A = bd_bfs.search()
A = time.time() - start_bd_bfs_time
print("""Bidirectional BFS computation time : """, bd_bfs_time)
| 77 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __lowerCamelCase ( UpperCamelCase__=None ):
"""simple docstring"""
if subparsers is not None:
_UpperCAmelCase = subparsers.add_parser("test" )
else:
_UpperCAmelCase = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=UpperCamelCase__ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase__ )
return parser
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
_UpperCAmelCase = script_name
else:
_UpperCAmelCase = f"--config_file={args.config_file} {script_name}"
_UpperCAmelCase = ["accelerate-launch"] + test_args.split()
_UpperCAmelCase = execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = test_command_parser()
_UpperCAmelCase = parser.parse_args()
test_command(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 657 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class __A ( unittest.TestCase ):
def __init__(self : List[str] , __a : Any , __a : str=7 , __a : Optional[int]=3 , __a : Tuple=18 , __a : Union[str, Any]=30 , __a : Dict=400 , __a : Tuple=True , __a : List[str]=32 , __a : Optional[int]=True , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size_divisor
UpperCAmelCase_ = do_rescale
def _lowercase (self : int ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : str = GLPNImageProcessor if is_vision_available() else None
def _lowercase (self : Tuple ):
UpperCAmelCase_ = GLPNImageProcessingTester(self )
@property
def _lowercase (self : Optional[int] ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , "do_resize" ) )
self.assertTrue(hasattr(__a , "size_divisor" ) )
self.assertTrue(hasattr(__a , "resample" ) )
self.assertTrue(hasattr(__a , "do_rescale" ) )
def _lowercase (self : Optional[Any] ):
pass
def _lowercase (self : Optional[int] ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def _lowercase (self : Optional[int] ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def _lowercase (self : Tuple ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 78 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
return 10 - x * x
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if equation(UpperCamelCase__ ) * equation(UpperCamelCase__ ) >= 0:
raise ValueError("Wrong space!" )
_UpperCAmelCase = a
while (b - a) >= 0.01:
# Find middle point
_UpperCAmelCase = (a + b) / 2
# Check if middle point is root
if equation(UpperCamelCase__ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(UpperCamelCase__ ) * equation(UpperCamelCase__ ) < 0:
_UpperCAmelCase = c
else:
_UpperCAmelCase = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 657 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : int = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""adapter_layer""": """encoder.layers.*.adapter_layer""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
"""pooling_layer.linear""": """projector""",
"""pooling_layer.projection""": """classifier""",
}
SCREAMING_SNAKE_CASE__ : Tuple = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""projector""",
"""classifier""",
]
def _lowerCamelCase ( __lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ : Tuple = {}
with open(__lowerCamelCase , """r""" ) as file:
for line_number, line in enumerate(__lowerCamelCase ):
UpperCAmelCase__ : Optional[int] = line.strip()
if line:
UpperCAmelCase__ : Dict = line.split()
UpperCAmelCase__ : List[str] = line_number
UpperCAmelCase__ : str = words[0]
UpperCAmelCase__ : str = value
return result
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
'''simple docstring'''
for attribute in key.split(""".""" ):
UpperCAmelCase__ : Dict = getattr(__lowerCamelCase , __lowerCamelCase )
UpperCAmelCase__ : int = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__lowerCamelCase ):
UpperCAmelCase__ : Any = PARAM_MAPPING[full_name.split(""".""" )[-1]]
UpperCAmelCase__ : Dict = """param"""
if weight_type is not None and weight_type != "param":
UpperCAmelCase__ : str = getattr(__lowerCamelCase , __lowerCamelCase ).shape
elif weight_type is not None and weight_type == "param":
UpperCAmelCase__ : Union[str, Any] = hf_pointer
for attribute in hf_param_name.split(""".""" ):
UpperCAmelCase__ : str = getattr(__lowerCamelCase , __lowerCamelCase )
UpperCAmelCase__ : Optional[int] = shape_pointer.shape
# let's reduce dimension
UpperCAmelCase__ : int = value[0]
else:
UpperCAmelCase__ : Optional[int] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
UpperCAmelCase__ : Dict = value
elif weight_type == "weight_g":
UpperCAmelCase__ : Dict = value
elif weight_type == "weight_v":
UpperCAmelCase__ : int = value
elif weight_type == "bias":
UpperCAmelCase__ : Optional[int] = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
UpperCAmelCase__ : Any = getattr(__lowerCamelCase , __lowerCamelCase )
UpperCAmelCase__ : List[str] = value
else:
UpperCAmelCase__ : List[str] = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__lowerCamelCase ):
UpperCAmelCase__ : int = PARAM_MAPPING[full_name.split(""".""" )[-1]]
UpperCAmelCase__ : List[Any] = """param"""
if weight_type is not None and weight_type != "param":
UpperCAmelCase__ : List[str] = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
UpperCAmelCase__ : Optional[int] = """.""".join([key, hf_param_name] )
else:
UpperCAmelCase__ : Dict = key
UpperCAmelCase__ : List[Any] = value if """lm_head""" in full_key else value[0]
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""W_a""": """linear_1.weight""",
"""W_b""": """linear_2.weight""",
"""b_a""": """linear_1.bias""",
"""b_b""": """linear_2.bias""",
"""ln_W""": """norm.weight""",
"""ln_b""": """norm.bias""",
}
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None ) -> Any:
'''simple docstring'''
UpperCAmelCase__ : Tuple = False
for key, mapped_key in MAPPING.items():
UpperCAmelCase__ : Optional[int] = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
UpperCAmelCase__ : Optional[Any] = True
if "*" in mapped_key:
UpperCAmelCase__ : List[str] = name.split(__lowerCamelCase )[0].split(""".""" )[-2]
UpperCAmelCase__ : Optional[Any] = mapped_key.replace("""*""" , __lowerCamelCase )
if "weight_g" in name:
UpperCAmelCase__ : Any = """weight_g"""
elif "weight_v" in name:
UpperCAmelCase__ : Union[str, Any] = """weight_v"""
elif "bias" in name:
UpperCAmelCase__ : Union[str, Any] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase__ : List[Any] = """weight"""
else:
UpperCAmelCase__ : int = None
if hf_dict is not None:
rename_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return is_used
return is_used
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = []
UpperCAmelCase__ : List[str] = fairseq_model.state_dict()
UpperCAmelCase__ : Any = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase__ : str = False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == """group""" , )
UpperCAmelCase__ : Tuple = True
else:
UpperCAmelCase__ : str = load_wavaveca_layer(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(F"Unused weights: {unused_weights}" )
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : List[str] = full_name.split("""conv_layers.""" )[-1]
UpperCAmelCase__ : Any = name.split(""".""" )
UpperCAmelCase__ : int = int(items[0] )
UpperCAmelCase__ : Union[str, Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
UpperCAmelCase__ : Optional[int] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
UpperCAmelCase__ : Any = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
UpperCAmelCase__ : str = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
UpperCAmelCase__ : str = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(__lowerCamelCase )
@torch.no_grad()
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=True , __lowerCamelCase=False ) -> str:
'''simple docstring'''
if config_path is not None:
UpperCAmelCase__ : Any = WavaVecaConfig.from_pretrained(__lowerCamelCase )
else:
UpperCAmelCase__ : Dict = WavaVecaConfig()
if is_seq_class:
UpperCAmelCase__ : Any = read_txt_into_dict(__lowerCamelCase )
UpperCAmelCase__ : Dict = idalabel
UpperCAmelCase__ : int = WavaVecaForSequenceClassification(__lowerCamelCase )
UpperCAmelCase__ : Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
feature_extractor.save_pretrained(__lowerCamelCase )
elif is_finetuned:
if dict_path:
UpperCAmelCase__ : Any = Dictionary.load(__lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase__ : Union[str, Any] = target_dict.pad_index
UpperCAmelCase__ : str = target_dict.bos_index
UpperCAmelCase__ : Tuple = target_dict.eos_index
UpperCAmelCase__ : Tuple = len(target_dict.symbols )
UpperCAmelCase__ : Optional[Any] = os.path.join(__lowerCamelCase , """vocab.json""" )
if not os.path.isdir(__lowerCamelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__lowerCamelCase ) )
return
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
UpperCAmelCase__ : Dict = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCAmelCase__ : Optional[Any] = 0
UpperCAmelCase__ : Any = 1
with open(__lowerCamelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(__lowerCamelCase , __lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = WavaVecaCTCTokenizer(
__lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__lowerCamelCase , )
UpperCAmelCase__ : int = True if config.feat_extract_norm == """layer""" else False
UpperCAmelCase__ : List[str] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
UpperCAmelCase__ : Optional[int] = WavaVecaProcessor(feature_extractor=__lowerCamelCase , tokenizer=__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = WavaVecaForCTC(__lowerCamelCase )
else:
UpperCAmelCase__ : Any = WavaVecaForPreTraining(__lowerCamelCase )
if is_finetuned or is_seq_class:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
UpperCAmelCase__ : List[Any] = argparse.Namespace(task="""audio_pretraining""" )
UpperCAmelCase__ : List[Any] = fairseq.tasks.setup_task(__lowerCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__lowerCamelCase )
UpperCAmelCase__ : Optional[int] = model[0].eval()
recursively_load_weights(__lowerCamelCase , __lowerCamelCase , not is_finetuned )
hf_wavavec.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
parser.add_argument(
"""--is_seq_class""",
action="""store_true""",
help="""Whether the model to convert is a fine-tuned sequence classification model or not""",
)
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
SCREAMING_SNAKE_CASE__ : List[Any] = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 79 |
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
lowercase_ : Tuple = [r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self , a_ , a_ , a_ = None , a_ = 50257 , a_ = 1024 , a_ = 768 , a_ = 12 , a_ = 12 , a_ = None , a_ = "gelu_new" , a_ = 0.1 , a_ = 0.1 , a_ = 0.1 , a_ = 1e-5 , a_ = 0.02 , a_ = True , a_ = True , a_ = False , a_ = False , ) -> List[str]:
super().__init__()
_UpperCAmelCase = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"
f" `n_embd`: {n_embd} are not equal." )
_UpperCAmelCase = prefix_inner_dim
_UpperCAmelCase = prefix_hidden_dim
_UpperCAmelCase = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
_UpperCAmelCase = (
nn.Linear(self.prefix_hidden_dim , a_ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
_UpperCAmelCase = GPTaConfig(
vocab_size=a_ , n_positions=a_ , n_embd=a_ , n_layer=a_ , n_head=a_ , n_inner=a_ , activation_function=a_ , resid_pdrop=a_ , embd_pdrop=a_ , attn_pdrop=a_ , layer_norm_epsilon=a_ , initializer_range=a_ , scale_attn_weights=a_ , use_cache=a_ , scale_attn_by_inverse_layer_idx=a_ , reorder_and_upcast_attn=a_ , )
_UpperCAmelCase = GPTaLMHeadModel(a_ )
def _a ( self , a_ , a_ , a_ = None , a_ = None , ) -> Tuple:
_UpperCAmelCase = self.transformer.transformer.wte(a_ )
_UpperCAmelCase = self.encode_prefix(a_ )
_UpperCAmelCase = self.decode_prefix(a_ )
_UpperCAmelCase = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
_UpperCAmelCase = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
_UpperCAmelCase = torch.cat((dummy_token, input_ids) , dim=1 )
_UpperCAmelCase = self.transformer(inputs_embeds=a_ , labels=a_ , attention_mask=a_ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def _a ( self , a_ , a_ ) -> torch.Tensor:
return torch.zeros(a_ , self.prefix_length , dtype=torch.intaa , device=a_ )
def _a ( self , a_ ) -> Union[str, Any]:
return self.encode_prefix(a_ )
@torch.no_grad()
def _a ( self , a_ , a_ , a_ ) -> Union[str, Any]:
_UpperCAmelCase = torch.split(a_ , 1 , dim=0 )
_UpperCAmelCase = []
_UpperCAmelCase = []
for feature in features:
_UpperCAmelCase = self.decode_prefix(feature.to(a_ ) ) # back to the clip feature
# Only support beam search for now
_UpperCAmelCase , _UpperCAmelCase = self.generate_beam(
input_embeds=a_ , device=a_ , eos_token_id=a_ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
_UpperCAmelCase = torch.stack(a_ )
_UpperCAmelCase = torch.stack(a_ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def _a ( self , a_=None , a_=None , a_=None , a_ = 5 , a_ = 67 , a_ = 1.0 , a_ = None , ) -> Optional[Any]:
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = torch.ones(a_ , device=a_ , dtype=torch.int )
_UpperCAmelCase = torch.zeros(a_ , device=a_ , dtype=torch.bool )
if input_embeds is not None:
_UpperCAmelCase = input_embeds
else:
_UpperCAmelCase = self.transformer.transformer.wte(a_ )
for i in range(a_ ):
_UpperCAmelCase = self.transformer(inputs_embeds=a_ )
_UpperCAmelCase = outputs.logits
_UpperCAmelCase = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
_UpperCAmelCase = logits.softmax(-1 ).log()
if scores is None:
_UpperCAmelCase , _UpperCAmelCase = logits.topk(a_ , -1 )
_UpperCAmelCase = generated.expand(a_ , *generated.shape[1:] )
_UpperCAmelCase , _UpperCAmelCase = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
_UpperCAmelCase = next_tokens
else:
_UpperCAmelCase = tokens.expand(a_ , *tokens.shape[1:] )
_UpperCAmelCase = torch.cat((tokens, next_tokens) , dim=1 )
else:
_UpperCAmelCase = -float(np.inf )
_UpperCAmelCase = 0
_UpperCAmelCase = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
_UpperCAmelCase = scores_sum / seq_lengths[:, None]
_UpperCAmelCase , _UpperCAmelCase = scores_sum_average.view(-1 ).topk(a_ , -1 )
_UpperCAmelCase = next_tokens // scores_sum.shape[1]
_UpperCAmelCase = seq_lengths[next_tokens_source]
_UpperCAmelCase = next_tokens % scores_sum.shape[1]
_UpperCAmelCase = next_tokens.unsqueeze(1 )
_UpperCAmelCase = tokens[next_tokens_source]
_UpperCAmelCase = torch.cat((tokens, next_tokens) , dim=1 )
_UpperCAmelCase = generated[next_tokens_source]
_UpperCAmelCase = scores_sum_average * seq_lengths
_UpperCAmelCase = is_stopped[next_tokens_source]
_UpperCAmelCase = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
_UpperCAmelCase = torch.cat((generated, next_token_embed) , dim=1 )
_UpperCAmelCase = is_stopped + next_tokens.eq(a_ ).squeeze()
if is_stopped.all():
break
_UpperCAmelCase = scores / seq_lengths
_UpperCAmelCase = scores.argsort(descending=a_ )
# tokens tensors are already padded to max_seq_length
_UpperCAmelCase = [tokens[i] for i in order]
_UpperCAmelCase = torch.stack(a_ , dim=0 )
_UpperCAmelCase = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 657 | 0 |
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class __UpperCamelCase ( _lowerCAmelCase ):
# to overwrite at feature extractactor specific tests
__snake_case :Optional[int] = None
__snake_case :Dict = None
@property
def _a ( self : str ) -> List[str]:
"""simple docstring"""
return self.feat_extract_tester.prepare_feat_extract_dict()
def _a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_lowerCAmelCase , """feature_size""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """sampling_rate""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """padding_value""" ) )
def _a ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = self.feat_extract_tester.prepare_inputs_for_common()
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_lowerCAmelCase ) == len(_lowerCAmelCase ) for x, y in zip(_lowerCAmelCase , processed_features[input_name] ) ) )
__lowercase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_lowerCAmelCase )
__lowercase = BatchFeature({input_name: speech_inputs} , tensor_type="""np""" )
__lowercase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__lowercase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def _a ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_lowerCAmelCase )
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} , tensor_type="""pt""" )
__lowercase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__lowercase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def _a ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_lowerCAmelCase )
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} , tensor_type="""tf""" )
__lowercase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__lowercase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def _a ( self : str , _lowerCAmelCase : List[Any]=False ) -> int:
"""simple docstring"""
def _inputs_have_equal_length(_lowerCAmelCase : int ):
__lowercase = len(input[0] )
for input_slice in input[1:]:
if len(_lowerCAmelCase ) != length:
return False
return True
def _inputs_are_equal(_lowerCAmelCase : Dict , _lowerCAmelCase : Tuple ):
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
return False
for input_slice_a, input_slice_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
if not np.allclose(np.asarray(_lowerCAmelCase ) , np.asarray(_lowerCAmelCase ) , atol=1e-3 ):
return False
return True
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase = self.feat_extract_tester.prepare_inputs_for_common(numpify=_lowerCAmelCase )
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
__lowercase = self.feat_extract_tester.seq_length_diff
__lowercase = self.feat_extract_tester.max_seq_length + pad_diff
__lowercase = self.feat_extract_tester.min_seq_length
__lowercase = self.feat_extract_tester.batch_size
__lowercase = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
__lowercase = feat_extract.pad(_lowerCAmelCase , padding=_lowerCAmelCase )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""longest""" )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[-1] ) )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""longest""" , return_tensors="""np""" )
__lowercase = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(_lowerCAmelCase ):
feat_extract.pad(_lowerCAmelCase , padding="""max_length""" )[input_name]
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=_lowerCAmelCase , return_tensors="""np""" )
__lowercase = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(_inputs_are_equal(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
__lowercase = feat_extract.pad(_lowerCAmelCase , pad_to_multiple_of=10 )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""longest""" , pad_to_multiple_of=10 )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , pad_to_multiple_of=10 , max_length=_lowerCAmelCase )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , pad_to_multiple_of=10 , max_length=_lowerCAmelCase , return_tensors="""np""" , )
__lowercase = input_a[input_name]
self.assertTrue(all(len(_lowerCAmelCase ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(_lowerCAmelCase , _lowerCAmelCase ) )
__lowercase = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(_lowerCAmelCase ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
__lowercase = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1e-3 )
def _a ( self : Tuple , _lowerCAmelCase : str=False ) -> Union[str, Any]:
"""simple docstring"""
def _inputs_have_equal_length(_lowerCAmelCase : Tuple ):
__lowercase = len(input[0] )
for input_slice in input[1:]:
if len(_lowerCAmelCase ) != length:
return False
return True
def _inputs_are_equal(_lowerCAmelCase : Any , _lowerCAmelCase : str ):
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
return False
for input_slice_a, input_slice_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
if not np.allclose(np.asarray(_lowerCAmelCase ) , np.asarray(_lowerCAmelCase ) , atol=1e-3 ):
return False
return True
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase = self.feat_extract_tester.prepare_inputs_for_common(numpify=_lowerCAmelCase )
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) , truncation=_lowerCAmelCase )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) )
__lowercase = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertFalse(_inputs_have_equal_length(_lowerCAmelCase ) )
# truncate to smallest with np
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) , return_tensors="""np""" , truncation=_lowerCAmelCase , )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) , return_tensors="""np""" )
__lowercase = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_lowerCAmelCase ) )
# truncate to middle
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[1] ) , truncation=_lowerCAmelCase , return_tensors="""np""" , )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[1] ) , truncation=_lowerCAmelCase )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[1] ) , return_tensors="""np""" )
__lowercase = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(_inputs_are_equal(_lowerCAmelCase , _lowerCAmelCase ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_lowerCAmelCase ):
feat_extract.pad(_lowerCAmelCase , truncation=_lowerCAmelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_lowerCAmelCase ):
feat_extract.pad(_lowerCAmelCase , padding="""longest""" , truncation=_lowerCAmelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_lowerCAmelCase ):
feat_extract.pad(_lowerCAmelCase , padding="""longest""" , truncation=_lowerCAmelCase )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(_lowerCAmelCase ):
feat_extract.pad(_lowerCAmelCase , padding="""max_length""" , truncation=_lowerCAmelCase )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
__lowercase = 12
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_lowerCAmelCase , truncation=_lowerCAmelCase , )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_lowerCAmelCase , )
__lowercase = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
__lowercase = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
__lowercase = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertFalse(_inputs_have_equal_length(_lowerCAmelCase ) )
def _a ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
self._check_padding(numpify=_lowerCAmelCase )
def _a ( self : List[Any] ) -> Dict:
"""simple docstring"""
self._check_padding(numpify=_lowerCAmelCase )
def _a ( self : int ) -> Tuple:
"""simple docstring"""
self._check_truncation(numpify=_lowerCAmelCase )
def _a ( self : str ) -> str:
"""simple docstring"""
self._check_truncation(numpify=_lowerCAmelCase )
@require_torch
def _a ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase = self.feat_extract_tester.prepare_inputs_for_common()
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""longest""" , return_tensors="""np""" )[input_name]
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""longest""" , return_tensors="""pt""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
@require_tf
def _a ( self : Any ) -> Any:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase = self.feat_extract_tester.prepare_inputs_for_common()
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""longest""" , return_tensors="""np""" )[input_name]
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""longest""" , return_tensors="""tf""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def _a ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.feat_extract_dict
__lowercase = True
__lowercase = self.feature_extraction_class(**_lowerCAmelCase )
__lowercase = self.feat_extract_tester.prepare_inputs_for_common()
__lowercase = [len(_lowerCAmelCase ) for x in speech_inputs]
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""longest""" , return_tensors="""np""" )
self.assertIn("""attention_mask""" , _lowerCAmelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _lowerCAmelCase )
def _a ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.feat_extract_dict
__lowercase = True
__lowercase = self.feature_extraction_class(**_lowerCAmelCase )
__lowercase = self.feat_extract_tester.prepare_inputs_for_common()
__lowercase = [len(_lowerCAmelCase ) for x in speech_inputs]
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
__lowercase = min(_lowerCAmelCase )
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=_lowerCAmelCase , truncation=_lowerCAmelCase , return_tensors="""np""" )
self.assertIn("""attention_mask""" , _lowerCAmelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 80 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__magic_name__ = {'''configuration_gpt_neox''': ['''GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXConfig''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''GPTNeoXTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXForCausalLM''',
'''GPTNeoXForQuestionAnswering''',
'''GPTNeoXForSequenceClassification''',
'''GPTNeoXForTokenClassification''',
'''GPTNeoXLayer''',
'''GPTNeoXModel''',
'''GPTNeoXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 0 |
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class a (_lowerCAmelCase ):
"""simple docstring"""
def __snake_case ( self : str ) -> str:
__snake_case : Tuple = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(lowerCamelCase , "neck_hidden_sizes" ) )
self.parent.assertTrue(hasattr(lowerCamelCase , "num_attention_heads" ) )
class a :
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase : List[str] , lowerCamelCase : Tuple=13 , lowerCamelCase : str=32 , lowerCamelCase : Dict=2 , lowerCamelCase : List[str]=3 , lowerCamelCase : Any=640 , lowerCamelCase : Optional[Any]=4 , lowerCamelCase : Tuple="silu" , lowerCamelCase : int=3 , lowerCamelCase : Dict=32 , lowerCamelCase : str=0.1 , lowerCamelCase : Optional[int]=0.1 , lowerCamelCase : Optional[Any]=0.1 , lowerCamelCase : Dict=0.02 , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : Optional[int]=True , lowerCamelCase : Union[str, Any]=10 , lowerCamelCase : int=None , ) -> str:
__snake_case : Optional[Any] = parent
__snake_case : Optional[Any] = batch_size
__snake_case : Any = image_size
__snake_case : List[Any] = patch_size
__snake_case : Any = num_channels
__snake_case : Union[str, Any] = last_hidden_size
__snake_case : Any = num_attention_heads
__snake_case : Any = hidden_act
__snake_case : Tuple = conv_kernel_size
__snake_case : Any = output_stride
__snake_case : Any = hidden_dropout_prob
__snake_case : List[Any] = attention_probs_dropout_prob
__snake_case : Optional[Any] = classifier_dropout_prob
__snake_case : Union[str, Any] = use_labels
__snake_case : Optional[int] = is_training
__snake_case : Dict = num_labels
__snake_case : Any = initializer_range
__snake_case : Optional[int] = scope
def __snake_case ( self : str ) -> Union[str, Any]:
__snake_case : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : List[Any] = None
__snake_case : Optional[int] = None
if self.use_labels:
__snake_case : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
__snake_case : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case : Optional[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def __snake_case ( self : Any ) -> Union[str, Any]:
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __snake_case ( self : Any , lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Tuple , lowerCamelCase : Optional[int] ) -> Dict:
__snake_case : List[Any] = MobileViTModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : List[str] = model(lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __snake_case ( self : Optional[Any] , lowerCamelCase : List[str] , lowerCamelCase : Dict , lowerCamelCase : Optional[Any] , lowerCamelCase : Tuple ) -> List[str]:
__snake_case : str = self.num_labels
__snake_case : List[Any] = MobileViTForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : List[Any] = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : Dict , lowerCamelCase : Any , lowerCamelCase : Any , lowerCamelCase : Optional[Any] , lowerCamelCase : Dict ) -> Dict:
__snake_case : Union[str, Any] = self.num_labels
__snake_case : Optional[int] = MobileViTForSemanticSegmentation(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : Tuple = model(lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__snake_case : List[Any] = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __snake_case ( self : Optional[int] ) -> List[Any]:
__snake_case : Optional[Any] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = config_and_inputs
__snake_case : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a (_lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase : Optional[Any] = (
{
"feature-extraction": MobileViTModel,
"image-classification": MobileViTForImageClassification,
"image-segmentation": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : int = False
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : Optional[int] = False
def __snake_case ( self : Optional[int] ) -> Dict:
__snake_case : Tuple = MobileViTModelTester(self )
__snake_case : Any = MobileViTConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase )
def __snake_case ( self : Optional[int] ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViT does not use inputs_embeds" )
def __snake_case ( self : Dict ) -> Any:
pass
@unittest.skip(reason="MobileViT does not support input and output embeddings" )
def __snake_case ( self : Dict ) -> List[Any]:
pass
@unittest.skip(reason="MobileViT does not output attentions" )
def __snake_case ( self : int ) -> Dict:
pass
def __snake_case ( self : int ) -> Union[str, Any]:
__snake_case , __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Optional[Any] = model_class(lowerCamelCase )
__snake_case : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : int = [*signature.parameters.keys()]
__snake_case : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __snake_case ( self : int ) -> Tuple:
pass
def __snake_case ( self : Any ) -> Tuple:
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def __snake_case ( self : Any ) -> str:
def check_hidden_states_output(lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict , lowerCamelCase : Any ):
__snake_case : int = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
__snake_case : int = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
__snake_case : Union[str, Any] = outputs.hidden_states
__snake_case : int = 5
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__snake_case : List[Any] = 2
for i in range(len(lowerCamelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__snake_case , __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : str = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : List[Any] = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def __snake_case ( self : Any ) -> Any:
__snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
def __snake_case ( self : List[str] ) -> List[str]:
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase )
@slow
def __snake_case ( self : List[str] ) -> Any:
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : List[str] = MobileViTModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def lowerCAmelCase_ ( ):
__snake_case : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class a (unittest.TestCase ):
"""simple docstring"""
@cached_property
def __snake_case ( self : str ) -> Dict:
return MobileViTImageProcessor.from_pretrained("apple/mobilevit-xx-small" ) if is_vision_available() else None
@slow
def __snake_case ( self : Union[str, Any] ) -> List[str]:
__snake_case : Tuple = MobileViTForImageClassification.from_pretrained("apple/mobilevit-xx-small" ).to(lowerCamelCase )
__snake_case : Optional[Any] = self.default_image_processor
__snake_case : Union[str, Any] = prepare_img()
__snake_case : List[Any] = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__snake_case : Dict = model(**lowerCamelCase )
# verify the logits
__snake_case : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
__snake_case : List[Any] = torch.tensor([-1.93_64, -1.23_27, -0.46_53] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1E-4 ) )
@slow
def __snake_case ( self : str ) -> Optional[int]:
__snake_case : Optional[int] = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
__snake_case : str = model.to(lowerCamelCase )
__snake_case : int = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
__snake_case : Optional[int] = prepare_img()
__snake_case : List[Any] = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__snake_case : List[str] = model(**lowerCamelCase )
__snake_case : Union[str, Any] = outputs.logits
# verify the logits
__snake_case : Tuple = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , lowerCamelCase )
__snake_case : Union[str, Any] = torch.tensor(
[
[[6.97_13, 6.97_86, 7.24_22], [7.28_93, 7.28_25, 7.44_46], [7.65_80, 7.87_97, 7.94_20]],
[[-10.68_69, -10.32_50, -10.34_71], [-10.42_28, -9.98_68, -9.71_32], [-11.04_05, -11.02_21, -10.73_18]],
[[-3.30_89, -2.85_39, -2.67_40], [-3.27_06, -2.56_21, -2.51_08], [-3.25_34, -2.66_15, -2.66_51]],
] , device=lowerCamelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase , atol=1E-4 ) )
@slow
def __snake_case ( self : Union[str, Any] ) -> Optional[int]:
__snake_case : Optional[Any] = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
__snake_case : Tuple = model.to(lowerCamelCase )
__snake_case : Dict = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
__snake_case : List[Any] = prepare_img()
__snake_case : List[Any] = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__snake_case : Any = model(**lowerCamelCase )
__snake_case : Dict = outputs.logits.detach().cpu()
__snake_case : Any = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase , target_sizes=[(50, 60)] )
__snake_case : int = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , lowerCamelCase )
__snake_case : List[str] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase )
__snake_case : Optional[int] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , lowerCamelCase )
| 81 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : Union[str, Any] = '''convbert'''
def __init__( self , a_=30522 , a_=768 , a_=12 , a_=12 , a_=3072 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=2 , a_=0.02 , a_=1e-12 , a_=1 , a_=0 , a_=2 , a_=768 , a_=2 , a_=9 , a_=1 , a_=None , **a_ , ) -> Tuple:
super().__init__(
pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ , )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = embedding_size
_UpperCAmelCase = head_ratio
_UpperCAmelCase = conv_kernel_size
_UpperCAmelCase = num_groups
_UpperCAmelCase = classifier_dropout
class _lowerCAmelCase ( lowerCamelCase ):
@property
def _a ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_UpperCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 657 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
lowerCamelCase = logging.get_logger(__name__)
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : str , *_UpperCAmelCase : Any , **_UpperCAmelCase : int ) -> None:
'''simple docstring'''
warnings.warn(
"The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use OwlViTImageProcessor instead." , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 82 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
return "".join([hex(UpperCamelCase__ )[2:].zfill(2 ).upper() for byte in list(UpperCamelCase__ )] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if (len(UpperCamelCase__ ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(UpperCamelCase__ ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(UpperCamelCase__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 | 0 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def snake_case_ ( A_ : str, A_ : str, A_ : str, A_ : PreTrainedTokenizer, A_ : int, A_ : Optional[int] = None, ):
'''simple docstring'''
_lowerCamelCase : Dict = {}
if train_file is not None:
_lowerCamelCase : Union[str, Any] = [train_file]
if eval_file is not None:
_lowerCamelCase : List[str] = [eval_file]
if test_file is not None:
_lowerCamelCase : Any = [test_file]
_lowerCamelCase : str = datasets.load_dataset('''csv''', data_files=A_ )
_lowerCamelCase : Dict = list(ds[list(files.keys() )[0]].features.keys() )
_lowerCamelCase : List[str] = features_name.pop(A_ )
_lowerCamelCase : List[str] = list(set(ds[list(files.keys() )[0]][label_name] ) )
_lowerCamelCase : Tuple = {label: i for i, label in enumerate(A_ )}
_lowerCamelCase : Optional[Any] = tokenizer.model_input_names
_lowerCamelCase : Union[str, Any] = {}
if len(A_ ) == 1:
for k in files.keys():
_lowerCamelCase : int = ds[k].map(
lambda A_ : tokenizer.batch_encode_plus(
example[features_name[0]], truncation=A_, max_length=A_, padding='''max_length''' ), batched=A_, )
elif len(A_ ) == 2:
for k in files.keys():
_lowerCamelCase : int = ds[k].map(
lambda A_ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]), truncation=A_, max_length=A_, padding='''max_length''', ), batched=A_, )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
_lowerCamelCase : List[Any] = {k: v for k, v in ex.items() if k in input_names}
_lowerCamelCase : Tuple = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
_lowerCamelCase : Union[str, Any] = {k: v for k, v in ex.items() if k in input_names}
_lowerCamelCase : Any = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
_lowerCamelCase : List[str] = {k: v for k, v in ex.items() if k in input_names}
_lowerCamelCase : Any = labelaid[ex[label_name]]
yield (d, label)
_lowerCamelCase : str = (
tf.data.Dataset.from_generator(
A_, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
_lowerCamelCase : Optional[Any] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
_lowerCamelCase : List[Any] = (
tf.data.Dataset.from_generator(
A_, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
_lowerCamelCase : Union[str, Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
_lowerCamelCase : List[Any] = (
tf.data.Dataset.from_generator(
A_, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
_lowerCamelCase : Tuple = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
lowerCAmelCase__ = logging.getLogger(__name__)
@dataclass
class __snake_case :
snake_case__ : int = field(metadata={"help": "Which column contains the label"})
snake_case__ : str = field(default=_lowercase , metadata={"help": "The path of the training file"})
snake_case__ : Optional[str] = field(default=_lowercase , metadata={"help": "The path of the development file"})
snake_case__ : Optional[str] = field(default=_lowercase , metadata={"help": "The path of the test file"})
snake_case__ : int = field(
default=1_2_8 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case__ : bool = field(
default=_lowercase , metadata={"help": "Overwrite the cached training and evaluation sets"})
@dataclass
class __snake_case :
snake_case__ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
snake_case__ : Optional[str] = field(
default=_lowercase , metadata={"help": "Pretrained config name or path if not the same as model_name"})
snake_case__ : Optional[str] = field(
default=_lowercase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
snake_case__ : bool = field(default=_lowercase , metadata={"help": "Set this flag to use fast tokenization."})
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
snake_case__ : Optional[str] = field(
default=_lowercase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, )
logger.info(
F'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
F'''16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Tuple = get_tfds(
train_file=data_args.train_file, eval_file=data_args.dev_file, test_file=data_args.test_file, tokenizer=A_, label_column_id=data_args.label_column_id, max_seq_length=data_args.max_seq_length, )
_lowerCamelCase : int = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=len(A_ ), labelaid=A_, idalabel={id: label for label, id in labelaid.items()}, finetuning_task='''text-classification''', cache_dir=model_args.cache_dir, )
with training_args.strategy.scope():
_lowerCamelCase : Union[str, Any] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path, from_pt=bool('''.bin''' in model_args.model_name_or_path ), config=A_, cache_dir=model_args.cache_dir, )
def compute_metrics(A_ : EvalPrediction ) -> Dict:
_lowerCamelCase : Dict = np.argmax(p.predictions, axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
_lowerCamelCase : Tuple = TFTrainer(
model=A_, args=A_, train_dataset=A_, eval_dataset=A_, compute_metrics=A_, )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_lowerCamelCase : Tuple = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_lowerCamelCase : List[str] = trainer.evaluate()
_lowerCamelCase : List[Any] = os.path.join(training_args.output_dir, '''eval_results.txt''' )
with open(A_, '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(F''' {key} = {value}''' )
writer.write(F'''{key} = {value}\n''' )
results.update(A_ )
return results
if __name__ == "__main__":
main()
| 83 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
try:
_UpperCAmelCase = float(UpperCamelCase__ )
except ValueError:
raise ValueError("Please enter a valid number" )
_UpperCAmelCase = decimal - int(UpperCamelCase__ )
if fractional_part == 0:
return int(UpperCamelCase__ ), 1
else:
_UpperCAmelCase = len(str(UpperCamelCase__ ).split("." )[1] )
_UpperCAmelCase = int(decimal * (10**number_of_frac_digits) )
_UpperCAmelCase = 10**number_of_frac_digits
_UpperCAmelCase , _UpperCAmelCase = denominator, numerator
while True:
_UpperCAmelCase = dividend % divisor
if remainder == 0:
break
_UpperCAmelCase , _UpperCAmelCase = divisor, remainder
_UpperCAmelCase , _UpperCAmelCase = numerator / divisor, denominator / divisor
return int(UpperCamelCase__ ), int(UpperCamelCase__ )
if __name__ == "__main__":
print(f'''{decimal_to_fraction(2) = }''')
print(f'''{decimal_to_fraction(89.0) = }''')
print(f'''{decimal_to_fraction("67") = }''')
print(f'''{decimal_to_fraction("45.0") = }''')
print(f'''{decimal_to_fraction(1.5) = }''')
print(f'''{decimal_to_fraction("6.25") = }''')
print(f'''{decimal_to_fraction("78td") = }''')
| 657 | 0 |
from collections.abc import Callable
import numpy as np
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = int(np.ceil((x_end - xa) / step_size ) )
lowercase = np.zeros((n + 1,) )
lowercase = ya
lowercase = xa
for k in range(__SCREAMING_SNAKE_CASE ):
lowercase = y[k] + step_size * ode_func(__SCREAMING_SNAKE_CASE , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 84 |
"""simple docstring"""
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_UpperCAmelCase = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
_UpperCAmelCase = f"{src_lang}-{tgt_lang}"
_UpperCAmelCase = f"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n"
model_card_dir.mkdir(parents=UpperCamelCase__ , exist_ok=UpperCamelCase__ )
_UpperCAmelCase = os.path.join(UpperCamelCase__ , "README.md" )
print(f"Generating {path}" )
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as f:
f.write(UpperCamelCase__ )
# make sure we are under the root of the project
__magic_name__ = Path(__file__).resolve().parent.parent.parent
__magic_name__ = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
__magic_name__ = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name)
| 657 | 0 |
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 85 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _lowerCAmelCase ( metaclass=lowerCamelCase ):
lowercase_ : Dict = ['''torch''', '''torchsde''']
def __init__( self , *a_ , **a_ ) -> Optional[int]:
requires_backends(self , ["torch", "torchsde"] )
@classmethod
def _a ( cls , *a_ , **a_ ) -> Optional[Any]:
requires_backends(cls , ["torch", "torchsde"] )
@classmethod
def _a ( cls , *a_ , **a_ ) -> List[Any]:
requires_backends(cls , ["torch", "torchsde"] )
| 657 | 0 |
def __snake_case ( __UpperCamelCase : int = 1000 ):
"""simple docstring"""
A_ = 2**power
A_ = str(__UpperCamelCase )
A_ = list(__UpperCamelCase )
A_ = 0
for i in list_num:
sum_of_num += int(__UpperCamelCase )
return sum_of_num
if __name__ == "__main__":
__a :Union[str, Any] = int(input('Enter the power of 2: ').strip())
print('2 ^ ', power, ' = ', 2**power)
__a :List[str] = solution(power)
print('Sum of the digits is: ', result) | 86 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
__magic_name__ = logging.get_logger(__name__)
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : Optional[Any] = '''upernet'''
def __init__( self , a_=None , a_=512 , a_=0.02 , a_=[1, 2, 3, 6] , a_=True , a_=0.4 , a_=384 , a_=256 , a_=1 , a_=False , a_=255 , **a_ , ) -> List[Any]:
super().__init__(**a_ )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_UpperCAmelCase = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(a_ , a_ ):
_UpperCAmelCase = backbone_config.get("model_type" )
_UpperCAmelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase = config_class.from_dict(a_ )
_UpperCAmelCase = backbone_config
_UpperCAmelCase = hidden_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = pool_scales
_UpperCAmelCase = use_auxiliary_head
_UpperCAmelCase = auxiliary_loss_weight
_UpperCAmelCase = auxiliary_in_channels
_UpperCAmelCase = auxiliary_channels
_UpperCAmelCase = auxiliary_num_convs
_UpperCAmelCase = auxiliary_concat_input
_UpperCAmelCase = loss_ignore_index
def _a ( self ) -> int:
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.backbone_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
| 657 | 0 |
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError('''Input value must be an \'int\' type''' )
A__ = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 87 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import _LazyModule
__magic_name__ = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 657 | 0 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCAmelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""")
@dataclass
class lowercase__ :
__UpperCAmelCase = field(
default='''cifar10''' ,metadata={'''help''': '''Name of a dataset from the datasets package'''} )
__UpperCAmelCase = field(
default=A_ ,metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__UpperCAmelCase = field(
default=A_ ,metadata={'''help''': '''The column name of the images in the files.'''} )
__UpperCAmelCase = field(default=A_ ,metadata={'''help''': '''A folder containing the training data.'''} )
__UpperCAmelCase = field(default=A_ ,metadata={'''help''': '''A folder containing the validation data.'''} )
__UpperCAmelCase = field(
default=0.1_5 ,metadata={'''help''': '''Percent to split off of train for validation.'''} )
__UpperCAmelCase = field(
default=A_ ,metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} ,)
__UpperCAmelCase = field(
default=A_ ,metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} ,)
def UpperCamelCase_ ( self) -> Any:
_lowerCamelCase : Any = {}
if self.train_dir is not None:
_lowerCamelCase : int = self.train_dir
if self.validation_dir is not None:
_lowerCamelCase : Tuple = self.validation_dir
_lowerCamelCase : Optional[int] = data_files if data_files else None
@dataclass
class lowercase__ :
__UpperCAmelCase = field(
default=A_ ,metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} ,)
__UpperCAmelCase = field(
default=A_ ,metadata={'''help''': '''Pretrained config name or path if not the same as model_name_or_path'''} )
__UpperCAmelCase = field(
default=A_ ,metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} ,)
__UpperCAmelCase = field(
default=A_ ,metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} )
__UpperCAmelCase = field(
default='''main''' ,metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} ,)
__UpperCAmelCase = field(default=A_ ,metadata={'''help''': '''Name or path of preprocessor config.'''} )
__UpperCAmelCase = field(
default=A_ ,metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} ,)
__UpperCAmelCase = field(
default=0.7_5 ,metadata={'''help''': '''The ratio of the number of masked tokens in the input sequence.'''} )
__UpperCAmelCase = field(
default=A_ ,metadata={'''help''': '''Whether or not to train with normalized pixel values as target.'''} )
@dataclass
class lowercase__ ( A_ ):
__UpperCAmelCase = field(
default=1e-3 ,metadata={'''help''': '''Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'''} )
def _snake_case ( __snake_case : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : int = torch.stack([example["""pixel_values"""] for example in examples] )
return {"pixel_values": pixel_values}
def _snake_case ( ):
"""simple docstring"""
_lowerCamelCase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mae""" , __snake_case , __snake_case )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_lowerCamelCase : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(__snake_case )
transformers.utils.logging.set_verbosity(__snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
_lowerCamelCase : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowerCamelCase : Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
_lowerCamelCase : Optional[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
_lowerCamelCase : Tuple = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __snake_case ) and data_args.train_val_split > 0.0:
_lowerCamelCase : List[str] = ds["""train"""].train_test_split(data_args.train_val_split )
_lowerCamelCase : Union[str, Any] = split["""train"""]
_lowerCamelCase : Optional[int] = split["""test"""]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCamelCase : str = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
_lowerCamelCase : Dict = ViTMAEConfig.from_pretrained(model_args.config_name , **__snake_case )
elif model_args.model_name_or_path:
_lowerCamelCase : Union[str, Any] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **__snake_case )
else:
_lowerCamelCase : Optional[Any] = ViTMAEConfig()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(F'New config: {config}' )
# adapt config
config.update(
{
"""mask_ratio""": model_args.mask_ratio,
"""norm_pix_loss""": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
_lowerCamelCase : str = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **__snake_case )
elif model_args.model_name_or_path:
_lowerCamelCase : Dict = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **__snake_case )
else:
_lowerCamelCase : Union[str, Any] = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
_lowerCamelCase : List[Any] = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
_lowerCamelCase : Union[str, Any] = ViTMAEForPreTraining(__snake_case )
if training_args.do_train:
_lowerCamelCase : List[Any] = ds["""train"""].column_names
else:
_lowerCamelCase : Union[str, Any] = ds["""validation"""].column_names
if data_args.image_column_name is not None:
_lowerCamelCase : str = data_args.image_column_name
elif "image" in column_names:
_lowerCamelCase : Optional[Any] = """image"""
elif "img" in column_names:
_lowerCamelCase : List[Any] = """img"""
else:
_lowerCamelCase : str = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
_lowerCamelCase : Dict = image_processor.size["""shortest_edge"""]
else:
_lowerCamelCase : List[Any] = (image_processor.size["""height"""], image_processor.size["""width"""])
_lowerCamelCase : Tuple = Compose(
[
Lambda(lambda __snake_case : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(__snake_case , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(__snake_case : Optional[Any] ):
_lowerCamelCase : Dict = [transforms(__snake_case ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
_lowerCamelCase : int = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(__snake_case )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
_lowerCamelCase : Union[str, Any] = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(__snake_case )
# Compute absolute learning rate
_lowerCamelCase : Optional[Any] = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
_lowerCamelCase : Tuple = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
_lowerCamelCase : Optional[Any] = Trainer(
model=__snake_case , args=__snake_case , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=__snake_case , data_collator=__snake_case , )
# Training
if training_args.do_train:
_lowerCamelCase : Any = None
if training_args.resume_from_checkpoint is not None:
_lowerCamelCase : List[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowerCamelCase : Union[str, Any] = last_checkpoint
_lowerCamelCase : Optional[Any] = trainer.train(resume_from_checkpoint=__snake_case )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_lowerCamelCase : int = trainer.evaluate()
trainer.log_metrics("""eval""" , __snake_case )
trainer.save_metrics("""eval""" , __snake_case )
# Write model card and (optionally) push to hub
_lowerCamelCase : Optional[Any] = {
"""tasks""": """masked-auto-encoding""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-auto-encoding"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__snake_case )
else:
trainer.create_model_card(**__snake_case )
def _snake_case ( __snake_case : Dict ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 88 |
"""simple docstring"""
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCAmelCase :
def __init__( self , a_ , a_=2 , a_=3 , a_=4 , a_=2 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=99 , a_=36 , a_=3 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=16 , a_=2 , a_=0.02 , a_=6 , a_=6 , a_=3 , a_=4 , a_=None , a_=1000 , ) -> Optional[Any]:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = text_seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = coordinate_size
_UpperCAmelCase = shape_size
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_UpperCAmelCase = text_seq_length
_UpperCAmelCase = (image_size // patch_size) ** 2 + 1
_UpperCAmelCase = self.text_seq_length + self.image_seq_length
def _a ( self ) -> Dict:
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_UpperCAmelCase = bbox[i, j, 3]
_UpperCAmelCase = bbox[i, j, 1]
_UpperCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_UpperCAmelCase = bbox[i, j, 2]
_UpperCAmelCase = bbox[i, j, 0]
_UpperCAmelCase = t
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.text_seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_UpperCAmelCase = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Tuple:
_UpperCAmelCase = LayoutLMvaModel(config=a_ )
model.to(a_ )
model.eval()
# text + image
_UpperCAmelCase = model(a_ , pixel_values=a_ )
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ )
_UpperCAmelCase = model(a_ , bbox=a_ , pixel_values=a_ , token_type_ids=a_ )
_UpperCAmelCase = model(a_ , bbox=a_ , pixel_values=a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_UpperCAmelCase = model(a_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_UpperCAmelCase = model(pixel_values=a_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Optional[Any]:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = LayoutLMvaForSequenceClassification(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Union[str, Any]:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = LayoutLMvaForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Dict:
_UpperCAmelCase = LayoutLMvaForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , start_positions=a_ , end_positions=a_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self ) -> Optional[int]:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase_ : Any = False
lowercase_ : Dict = False
lowercase_ : List[str] = False
lowercase_ : str = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase_ : int = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def _a ( self , a_ , a_ , a_ , a_ , a_ ) -> List[str]:
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = LayoutLMvaModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=a_ , hidden_size=37 )
def _a ( self , a_ , a_ , a_=False ) -> List[str]:
_UpperCAmelCase = copy.deepcopy(a_ )
if model_class in get_values(a_ ):
_UpperCAmelCase = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(a_ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(a_ ):
_UpperCAmelCase = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=a_ )
elif model_class in get_values(a_ ):
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
elif model_class in [
*get_values(a_ ),
]:
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
elif model_class in [
*get_values(a_ ),
]:
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=a_ , )
return inputs_dict
def _a ( self ) -> int:
self.config_tester.run_common_tests()
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase = type
self.model_tester.create_and_check_model(*a_ )
def _a ( self ) -> int:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a_ )
def _a ( self ) -> Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a_ )
@slow
def _a ( self ) -> List[str]:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = LayoutLMvaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _a ( self ) -> List[Any]:
return LayoutLMvaImageProcessor(apply_ocr=a_ ) if is_vision_available() else None
@slow
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(a_ )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=a_ , return_tensors="pt" ).pixel_values.to(a_ )
_UpperCAmelCase = torch.tensor([[1, 2]] )
_UpperCAmelCase = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
_UpperCAmelCase = model(
input_ids=input_ids.to(a_ ) , bbox=bbox.to(a_ ) , pixel_values=pixel_values.to(a_ ) , )
# verify the logits
_UpperCAmelCase = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , a_ )
_UpperCAmelCase = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , a_ , atol=1e-4 ) )
| 657 | 0 |
SCREAMING_SNAKE_CASE : Any = "\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
SCREAMING_SNAKE_CASE : List[str] = [{"type": "code", "content": INSTALL_CONTENT}]
SCREAMING_SNAKE_CASE : int = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 89 |
"""simple docstring"""
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _lowerCAmelCase ( unittest.TestCase ):
lowercase_ : str = MODEL_FOR_MASKED_LM_MAPPING
lowercase_ : List[str] = TF_MODEL_FOR_MASKED_LM_MAPPING
def _a ( self ) -> Optional[Any]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def _a ( self ) -> str:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf" )
_UpperCAmelCase = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is grouped", "score": 2.1e-05, "token": 38015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-05, "token": 25506, "token_str": " accuser"},
] , )
_UpperCAmelCase = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-05,
"token": 38015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-05,
"token": 25506,
"token_str": " accuser",
},
] , )
_UpperCAmelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-05, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt" )
_UpperCAmelCase = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is Maul", "score": 2.2e-05, "token": 35676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
_UpperCAmelCase = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
_UpperCAmelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is Patrick", "score": 2.1e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-05, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
] , )
_UpperCAmelCase = unmasker("My name is <mask> <mask>" , top_k=2 )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def _a ( self ) -> int:
_UpperCAmelCase = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt" )
# convert model to fp16
pipe.model.half()
_UpperCAmelCase = pipe("Paris is the [MASK] of France." )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(a_ , a_ )
@slow
@require_torch
def _a ( self ) -> int:
_UpperCAmelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt" )
self.run_large_test(a_ )
@slow
@require_tf
def _a ( self ) -> int:
_UpperCAmelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf" )
self.run_large_test(a_ )
def _a ( self , a_ ) -> int:
_UpperCAmelCase = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(a_ ) , [
{"sequence": "My name is John", "score": 0.008, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.007, "token": 1573, "token_str": " Chris"},
] , )
_UpperCAmelCase = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(a_ ) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.251,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.214,
"token": 12790,
"token_str": " Lyon",
},
] , )
_UpperCAmelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(a_ ) , [
{"sequence": "My name is Patrick", "score": 0.005, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.000, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.000, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def _a ( self ) -> Any:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt" )
_UpperCAmelCase = None
_UpperCAmelCase = None
self.run_pipeline_test(a_ , [] )
@require_tf
def _a ( self ) -> List[Any]:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf" )
_UpperCAmelCase = None
_UpperCAmelCase = None
self.run_pipeline_test(a_ , [] )
def _a ( self , a_ , a_ , a_ ) -> Optional[Any]:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)" )
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = [
f"This is another {tokenizer.mask_token} test",
]
return fill_masker, examples
def _a ( self , a_ , a_ ) -> List[str]:
_UpperCAmelCase = fill_masker.tokenizer
_UpperCAmelCase = fill_masker.model
_UpperCAmelCase = fill_masker(
f"This is a {tokenizer.mask_token}" , )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = fill_masker([f"This is a {tokenizer.mask_token}"] )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = fill_masker([f"This is a {tokenizer.mask_token}", f"Another {tokenizer.mask_token} great test."] )
self.assertEqual(
a_ , [
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
] , )
with self.assertRaises(a_ ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(a_ ):
fill_masker("This is" )
self.run_test_top_k(a_ , a_ )
self.run_test_targets(a_ , a_ )
self.run_test_top_k_targets(a_ , a_ )
self.fill_mask_with_duplicate_targets_and_top_k(a_ , a_ )
self.fill_mask_with_multiple_masks(a_ , a_ )
def _a ( self , a_ , a_ ) -> Optional[int]:
_UpperCAmelCase = tokenizer.get_vocab()
_UpperCAmelCase = sorted(vocab.keys() )[:2]
# Pipeline argument
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ , targets=a_ )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , a_ )
_UpperCAmelCase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(a_ ) )
# Call argument
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=a_ )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , a_ )
_UpperCAmelCase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(a_ ) )
# Score equivalence
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=a_ )
_UpperCAmelCase = [top_mask["token_str"] for top_mask in outputs]
_UpperCAmelCase = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(a_ ) == set(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=a_ )
_UpperCAmelCase = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(a_ ) , nested_simplify(a_ ) )
# Raises with invalid
with self.assertRaises(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=[""] )
with self.assertRaises(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets="" )
def _a ( self , a_ , a_ ) -> str:
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ , top_k=2 )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
self.assertEqual(nested_simplify(a_ ) , nested_simplify(a_ ) )
def _a ( self , a_ , a_ ) -> List[Any]:
_UpperCAmelCase = tokenizer.get_vocab()
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
# top_k=2, ntargets=3
_UpperCAmelCase = sorted(vocab.keys() )[:3]
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=2 , targets=a_ )
# If we use the most probably targets, and filter differently, we should still
# have the same results
_UpperCAmelCase = [el["token_str"] for el in sorted(a_ , key=lambda a_ : x["score"] , reverse=a_ )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(a_ ).issubset(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=3 , targets=a_ )
# They should yield exactly the same result
self.assertEqual(nested_simplify(a_ ) , nested_simplify(a_ ) )
def _a ( self , a_ , a_ ) -> Optional[Any]:
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = tokenizer.get_vocab()
# String duplicates + id duplicates
_UpperCAmelCase = sorted(vocab.keys() )[:3]
_UpperCAmelCase = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_UpperCAmelCase = fill_masker(f"My name is {tokenizer.mask_token}" , targets=a_ , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(a_ ) , 3 )
def _a ( self , a_ , a_ ) -> Any:
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = fill_masker(
f"This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
a_ , [
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
] , )
| 657 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_=13 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=99 , lowerCamelCase_=32 , lowerCamelCase_=5 , lowerCamelCase_=4 , lowerCamelCase_=37 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_12 , lowerCamelCase_=16 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=4 , ) -> List[Any]:
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_attention_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = num_choices
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_attention_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
if self.use_token_type_ids:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = True
lowerCAmelCase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class a__ ( a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : Dict = True
lowercase__ : str = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
lowerCAmelCase__ = FlaxBertModelTester(self )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> str:
# Only check this for base model, not necessary for all model classes.
# This will also help speed-up tests.
lowerCAmelCase__ = FlaxBertModel.from_pretrained('''bert-base-cased''' )
lowerCAmelCase__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase_ ) | 90 |
"""simple docstring"""
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class _lowerCAmelCase ( lowerCamelCase ):
def _a ( self ) -> List[str]:
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def _a ( self ) -> Optional[int]:
with self.assertRaises(a_ ):
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def _a ( self ) -> int:
with self.assertRaises(a_ ):
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("bool" ) , type=Value("int64" ) ) )
def _a ( self ) -> Optional[Any]:
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _a ( self ) -> int:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , type=Value("int64" ) ) )
def _a ( self ) -> Dict:
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , try_type=Value("int64" ) ) )
self.assertEqual(arr.type , pa.string() )
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def _a ( self ) -> Tuple:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , type=ArrayaD((1, 3) , "int64" ) ) )
def _a ( self ) -> str:
_UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def _a ( self ) -> Tuple:
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def _a ( self ) -> List[str]:
import PIL.Image
_UpperCAmelCase = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"datasets.arrow_writer.cast_to_python_objects" , side_effect=a_ ) as mock_cast_to_python_objects:
_UpperCAmelCase = pa.array(TypedSequence([{"path": None, "bytes": B"image_bytes"}, pil_image] , type=Image() ) )
_UpperCAmelCase , _UpperCAmelCase = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("optimize_list_casting" , a_ )
self.assertFalse(kwargs["optimize_list_casting"] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferReader(UpperCamelCase__ ) if isinstance(UpperCamelCase__ , pa.Buffer ) else pa.memory_map(UpperCamelCase__ )
_UpperCAmelCase = pa.ipc.open_stream(UpperCamelCase__ )
_UpperCAmelCase = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = Features({"labels": ClassLabel(names=["neg", "pos"] )} )
with ArrowWriter(stream=UpperCamelCase__ , features=UpperCamelCase__ ) as writer:
writer.write({"labels": 0} )
writer.write({"labels": 1} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pa.ipc.open_stream(UpperCamelCase__ )
_UpperCAmelCase = f.read_all()
_UpperCAmelCase = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(UpperCamelCase__ )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ , hash_salt="split_name" , check_duplicates=UpperCamelCase__ , ) as writer:
with pytest.raises(UpperCamelCase__ ):
writer.write({"col_1": "foo", "col_2": 1} , key=[1, 2] )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ , hash_salt="split_name" , check_duplicates=UpperCamelCase__ , ) as writer:
with pytest.raises(UpperCamelCase__ ):
writer.write({"col_1": "foo", "col_2": 1} , key=10 )
writer.write({"col_1": "bar", "col_2": 2} , key=10 )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ , hash_salt="split_name" , check_duplicates=UpperCamelCase__ , ) as writer:
writer.write({"col_1": "foo", "col_2": 1} , key=1 )
writer.write({"col_1": "bar", "col_2": 2} , key=2 )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
writer.write_batch({"col_1": [], "col_2": []} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write_table(pa.Table.from_pydict({"col_1": ["foo", "bar"], "col_2": [1, 2]} ) )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write_row(pa.Table.from_pydict({"col_1": ["foo"], "col_2": [1]} ) )
writer.write_row(pa.Table.from_pydict({"col_1": ["bar"], "col_2": [2]} ) )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __lowerCamelCase ( ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
_UpperCAmelCase = os.path.join(UpperCamelCase__ , "test.arrow" )
with ArrowWriter(path=UpperCamelCase__ , schema=pa.schema(UpperCamelCase__ ) ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(UpperCamelCase__ , 1 )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if pa.types.is_list(UpperCamelCase__ ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if isinstance(lst[0] , UpperCamelCase__ ):
change_first_primitive_element_in_list(lst[0] , UpperCamelCase__ )
else:
_UpperCAmelCase = value
@pytest.mark.parametrize("optimized_int_type, expected_dtype" , [(None, pa.intaa()), (Value("int32" ), pa.intaa())] )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.array(TypedSequence(UpperCamelCase__ , optimized_int_type=UpperCamelCase__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"col, expected_dtype" , [
("attention_mask", pa.inta()),
("special_tokens_mask", pa.inta()),
("token_type_ids", pa.inta()),
("input_ids", pa.intaa()),
("other", pa.intaa()),
] , )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.array(OptimizedTypedSequence(UpperCamelCase__ , col=UpperCamelCase__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
_UpperCAmelCase = copy.deepcopy(UpperCamelCase__ )
_UpperCAmelCase = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase = pa.array(OptimizedTypedSequence(UpperCamelCase__ , col=UpperCamelCase__ ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("raise_exception" , [False, True] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = str(tmp_path / "dataset-train.arrow" )
try:
with ArrowWriter(path=UpperCamelCase__ ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = "mock://dataset-train.arrow"
with ArrowWriter(path=UpperCamelCase__ , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(UpperCamelCase__ ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(UpperCamelCase__ )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(stream=UpperCamelCase__ ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pq.read_table(UpperCamelCase__ )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("embed_local_files" , [False, True] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
import PIL.Image
_UpperCAmelCase = str(tmp_path / "test_image_rgb.jpg" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(UpperCamelCase__ , format="png" )
_UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(
stream=UpperCamelCase__ , features=Features({"image": Image()} ) , embed_local_files=UpperCamelCase__ ) as writer:
writer.write({"image": image_path} )
writer.finalize()
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pq.read_table(UpperCamelCase__ )
_UpperCAmelCase = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["image"][0]["path"] , UpperCamelCase__ )
with open(UpperCamelCase__ , "rb" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = pa.schema([pa.field("col_1" , pa.string() , nullable=UpperCamelCase__ )] )
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(stream=UpperCamelCase__ ) as writer:
writer._build_writer(inferred_schema=UpperCamelCase__ )
assert writer._schema == pa.schema([pa.field("col_1" , pa.string() )] )
| 657 | 0 |
"""simple docstring"""
import random
class lowerCAmelCase_ :
'''simple docstring'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : str ) -> tuple[list[int], list[int]]:
A = [ord(A_ ) for i in text]
A = []
A = []
for i in plain:
A = random.randint(1 ,300 )
A = (i + k) * k
cipher.append(A_ )
key.append(A_ )
return cipher, key
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : list[int] ,A_ : list[int] ) -> str:
A = []
for i in range(len(A_ ) ):
A = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(A_ ) )
return "".join(A_ )
if __name__ == "__main__":
_lowercase , _lowercase = Onepad().encrypt('''Hello''')
print(c, k)
print(Onepad().decrypt(c, k)) | 91 |
"""simple docstring"""
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class _lowerCAmelCase ( unittest.TestCase ):
def _a ( self ) -> Optional[Any]:
_UpperCAmelCase = ["a", "b", "c"]
# Defaults to last layer if both are None
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(a_ , a_ , a_ )
self.assertEqual(a_ , ["c"] )
self.assertEqual(a_ , [2] )
# Out indices set to match out features
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(["a", "c"] , a_ , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features set to match out indices
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(a_ , [0, 2] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features selected from negative indices
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(a_ , [-3, -1] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [-3, -1] )
def _a ( self ) -> Optional[int]:
# Stage names must be set
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , a_ )
# Out features must be a list
with self.assertRaises(a_ ):
verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] )
# Out features must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] )
# Out indices must be a list or tuple
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , 0 , ["a", "b"] )
# Out indices must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , (0, 1) , ["a"] )
# Out features and out indices must be the same length
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] )
# Out features should match out indices
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] )
# Out features and out indices should be in order
with self.assertRaises(a_ ):
verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] )
# Check passes with valid inputs
verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] )
def _a ( self ) -> int:
_UpperCAmelCase = BackboneMixin()
_UpperCAmelCase = ["a", "b", "c"]
_UpperCAmelCase = ["a", "c"]
_UpperCAmelCase = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
_UpperCAmelCase = ["a", "b"]
self.assertEqual(backbone.out_features , ["a", "b"] )
self.assertEqual(backbone.out_indices , [0, 1] )
_UpperCAmelCase = [-3, -1]
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 657 | 0 |
'''simple docstring'''
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
UpperCamelCase_ = ["""small""", """medium""", """large"""]
UpperCamelCase_ = """lm_head.decoder.weight"""
UpperCamelCase_ = """lm_head.weight"""
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> List[str]:
lowercase : List[str] =torch.load(__magic_name__ )
lowercase : List[str] =d.pop(__magic_name__ )
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
torch.save(__magic_name__ , os.path.join(__magic_name__ , __magic_name__ ) )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("""--dialogpt_path""", default=""".""", type=str)
UpperCamelCase_ = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
UpperCamelCase_ = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''')
UpperCamelCase_ = f'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 92 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 0 |
"""simple docstring"""
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->list:
"""simple docstring"""
lowerCAmelCase__ :Tuple = word.split()
def justify(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
lowerCAmelCase__ :str = max_width - width
lowerCAmelCase__ :Optional[int] = len(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
lowerCAmelCase__ :int = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
lowerCAmelCase__ :List[str] = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
lowerCAmelCase__ :Tuple = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(_SCREAMING_SNAKE_CASE ):
num_spaces_between_words_list[i] += 1
lowerCAmelCase__ :Union[str, Any] = []
for i in range(_SCREAMING_SNAKE_CASE ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ' ' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :int = []
lowerCAmelCase__ :list[str] = []
lowerCAmelCase__ :List[Any] = 0
for word in words:
if width + len(_SCREAMING_SNAKE_CASE ) + len(_SCREAMING_SNAKE_CASE ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(_SCREAMING_SNAKE_CASE )
width += len(_SCREAMING_SNAKE_CASE )
else:
# justify the line and add it to result
answer.append(justify(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
# reset new line and new width
lowerCAmelCase__ , lowerCAmelCase__ :Tuple = [word], len(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[Any] = max_width - width - len(_SCREAMING_SNAKE_CASE )
answer.append(' '.join(_SCREAMING_SNAKE_CASE ) + (remaining_spaces + 1) * ' ' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 93 |
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class _lowerCAmelCase ( lowerCamelCase , unittest.TestCase ):
lowercase_ : Tuple = BarthezTokenizer
lowercase_ : List[Any] = BarthezTokenizerFast
lowercase_ : Dict = True
lowercase_ : int = True
def _a ( self ) -> Any:
super().setUp()
_UpperCAmelCase = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=a_ )
_UpperCAmelCase = tokenizer
def _a ( self ) -> List[Any]:
_UpperCAmelCase = "<pad>"
_UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(a_ ) , 101122 )
def _a ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def _a ( self ) -> List[Any]:
_UpperCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_UpperCAmelCase = [0, 57, 3018, 70307, 91, 2]
_UpperCAmelCase = self.tokenizer(
a_ , max_length=len(a_ ) , padding=a_ , truncation=a_ , return_tensors="pt" )
self.assertIsInstance(a_ , a_ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
_UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(a_ , a_ )
def _a ( self ) -> str:
if not self.test_rust_tokenizer:
return
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = "I was born in 92000, and this is falsé."
_UpperCAmelCase = tokenizer.tokenize(a_ )
_UpperCAmelCase = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
_UpperCAmelCase = tokenizer.encode(a_ , add_special_tokens=a_ )
_UpperCAmelCase = rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = tokenizer.encode(a_ )
_UpperCAmelCase = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
@slow
def _a ( self ) -> Dict:
# fmt: off
_UpperCAmelCase = {"input_ids": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_UpperCAmelCase = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=a_ , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=a_ , )
| 657 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['RemBertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['RemBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RemBertForCausalLM',
'RemBertForMaskedLM',
'RemBertForMultipleChoice',
'RemBertForQuestionAnswering',
'RemBertForSequenceClassification',
'RemBertForTokenClassification',
'RemBertLayer',
'RemBertModel',
'RemBertPreTrainedModel',
'load_tf_weights_in_rembert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRemBertForCausalLM',
'TFRemBertForMaskedLM',
'TFRemBertForMultipleChoice',
'TFRemBertForQuestionAnswering',
'TFRemBertForSequenceClassification',
'TFRemBertForTokenClassification',
'TFRemBertLayer',
'TFRemBertModel',
'TFRemBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 94 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_UpperCAmelCase = f"Input value of [number={number}] must be an integer"
raise TypeError(UpperCamelCase__ )
if number < 0:
return False
_UpperCAmelCase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase_ (__A , unittest.TestCase ):
__magic_name__ = ConsistencyModelPipeline
__magic_name__ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
__magic_name__ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
__magic_name__ = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
UpperCAmelCase_ : Union[str, Any] = UNetaDModel.from_pretrained(
"diffusers/consistency-models-test" , subfolder="test_unet" , )
return unet
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
UpperCAmelCase_ : Optional[Any] = UNetaDModel.from_pretrained(
"diffusers/consistency-models-test" , subfolder="test_unet_class_cond" , )
return unet
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : Optional[Any]=False ) -> List[Any]:
if class_cond:
UpperCAmelCase_ : Union[str, Any] = self.dummy_cond_unet
else:
UpperCAmelCase_ : Any = self.dummy_uncond_unet
# Default to CM multistep sampler
UpperCAmelCase_ : int = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
UpperCAmelCase_ : Any = {
"unet": unet,
"scheduler": scheduler,
}
return components
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any=0 ) -> List[Any]:
if str(lowerCAmelCase_ ).startswith("mps" ):
UpperCAmelCase_ : Any = torch.manual_seed(lowerCAmelCase_ )
else:
UpperCAmelCase_ : List[str] = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = {
"batch_size": 1,
"num_inference_steps": None,
"timesteps": [22, 0],
"generator": generator,
"output_type": "np",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : str = self.get_dummy_components()
UpperCAmelCase_ : List[str] = ConsistencyModelPipeline(**lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = self.get_dummy_inputs(lowerCAmelCase_ )
UpperCAmelCase_ : Any = pipe(**lowerCAmelCase_ ).images
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ : List[str] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : List[Any] = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
UpperCAmelCase_ : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Optional[int] = self.get_dummy_components(class_cond=lowerCAmelCase_ )
UpperCAmelCase_ : str = ConsistencyModelPipeline(**lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = self.get_dummy_inputs(lowerCAmelCase_ )
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : int = pipe(**lowerCAmelCase_ ).images
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ : Tuple = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Dict = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
UpperCAmelCase_ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Dict = self.get_dummy_components()
UpperCAmelCase_ : Tuple = ConsistencyModelPipeline(**lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = self.get_dummy_inputs(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = 1
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Dict = pipe(**lowerCAmelCase_ ).images
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[int] = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
UpperCAmelCase_ : int = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Tuple = self.get_dummy_components(class_cond=lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = ConsistencyModelPipeline(**lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
UpperCAmelCase_ : int = self.get_dummy_inputs(lowerCAmelCase_ )
UpperCAmelCase_ : str = 1
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : int = pipe(**lowerCAmelCase_ ).images
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ : List[str] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[int] = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class UpperCamelCase_ (unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : Any=0 , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : Optional[Any]="cpu" , lowerCAmelCase_ : Dict=torch.floataa , lowerCAmelCase_ : Optional[Any]=(1, 3, 64, 64) ) -> str:
UpperCAmelCase_ : str = torch.manual_seed(lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = {
"num_inference_steps": None,
"timesteps": [22, 0],
"class_labels": 0,
"generator": generator,
"output_type": "np",
}
if get_fixed_latents:
UpperCAmelCase_ : str = self.get_fixed_latents(seed=lowerCAmelCase_ , device=lowerCAmelCase_ , dtype=lowerCAmelCase_ , shape=lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = latents
return inputs
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : List[Any]=0 , lowerCAmelCase_ : Any="cpu" , lowerCAmelCase_ : Optional[Any]=torch.floataa , lowerCAmelCase_ : int=(1, 3, 64, 64) ) -> Optional[Any]:
if type(lowerCAmelCase_ ) == str:
UpperCAmelCase_ : Tuple = torch.device(lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ , dtype=lowerCAmelCase_ )
return latents
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
UpperCAmelCase_ : List[str] = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
UpperCAmelCase_ : str = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
UpperCAmelCase_ : int = ConsistencyModelPipeline(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
pipe.to(torch_device=lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = self.get_inputs()
UpperCAmelCase_ : int = pipe(**lowerCAmelCase_ ).images
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Any = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Union[str, Any] = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
UpperCAmelCase_ : List[str] = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
UpperCAmelCase_ : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
UpperCAmelCase_ : str = ConsistencyModelPipeline(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
pipe.to(torch_device=lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
UpperCAmelCase_ : str = self.get_inputs()
UpperCAmelCase_ : Union[str, Any] = 1
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : List[str] = pipe(**lowerCAmelCase_ ).images
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Union[str, Any] = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
UpperCAmelCase_ : Optional[int] = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
UpperCAmelCase_ : int = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
UpperCAmelCase_ : Optional[Any] = ConsistencyModelPipeline(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
pipe.to(torch_device=lowerCAmelCase_ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = self.get_inputs(get_fixed_latents=lowerCAmelCase_ , device=lowerCAmelCase_ )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=lowerCAmelCase_ , enable_math=lowerCAmelCase_ , enable_mem_efficient=lowerCAmelCase_ ):
UpperCAmelCase_ : Optional[Any] = pipe(**lowerCAmelCase_ ).images
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase_ : int = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
UpperCAmelCase_ : Tuple = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
UpperCAmelCase_ : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
UpperCAmelCase_ : Optional[Any] = ConsistencyModelPipeline(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
pipe.to(torch_device=lowerCAmelCase_ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = self.get_inputs(get_fixed_latents=lowerCAmelCase_ , device=lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = 1
UpperCAmelCase_ : Optional[int] = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=lowerCAmelCase_ , enable_math=lowerCAmelCase_ , enable_mem_efficient=lowerCAmelCase_ ):
UpperCAmelCase_ : Optional[int] = pipe(**lowerCAmelCase_ ).images
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Dict = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 95 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = Dict[str, Any]
__magic_name__ = List[Prediction]
@add_end_docstrings(lowerCamelCase )
class _lowerCAmelCase ( lowerCamelCase ):
def __init__( self , *a_ , **a_ ) -> Optional[int]:
super().__init__(*a_ , **a_ )
if self.framework == "tf":
raise ValueError(f"The {self.__class__} is only available in PyTorch." )
requires_backends(self , "vision" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def _a ( self , **a_ ) -> List[str]:
_UpperCAmelCase = {}
if "threshold" in kwargs:
_UpperCAmelCase = kwargs["threshold"]
return {}, {}, postprocess_kwargs
def __call__( self , *a_ , **a_ ) -> Union[Predictions, List[Prediction]]:
return super().__call__(*a_ , **a_ )
def _a ( self , a_ ) -> Optional[Any]:
_UpperCAmelCase = load_image(a_ )
_UpperCAmelCase = torch.IntTensor([[image.height, image.width]] )
_UpperCAmelCase = self.image_processor(images=[image] , return_tensors="pt" )
if self.tokenizer is not None:
_UpperCAmelCase = self.tokenizer(text=inputs["words"] , boxes=inputs["boxes"] , return_tensors="pt" )
_UpperCAmelCase = target_size
return inputs
def _a ( self , a_ ) -> Optional[Any]:
_UpperCAmelCase = model_inputs.pop("target_size" )
_UpperCAmelCase = self.model(**a_ )
_UpperCAmelCase = outputs.__class__({"target_size": target_size, **outputs} )
if self.tokenizer is not None:
_UpperCAmelCase = model_inputs["bbox"]
return model_outputs
def _a ( self , a_ , a_=0.9 ) -> int:
_UpperCAmelCase = model_outputs["target_size"]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
_UpperCAmelCase , _UpperCAmelCase = target_size[0].tolist()
def unnormalize(a_ ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
_UpperCAmelCase , _UpperCAmelCase = model_outputs["logits"].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
_UpperCAmelCase = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
_UpperCAmelCase = [unnormalize(a_ ) for bbox in model_outputs["bbox"].squeeze(0 )]
_UpperCAmelCase = ["score", "label", "box"]
_UpperCAmelCase = [dict(zip(a_ , a_ ) ) for vals in zip(scores.tolist() , a_ , a_ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
_UpperCAmelCase = self.image_processor.post_process_object_detection(a_ , a_ , a_ )
_UpperCAmelCase = raw_annotations[0]
_UpperCAmelCase = raw_annotation["scores"]
_UpperCAmelCase = raw_annotation["labels"]
_UpperCAmelCase = raw_annotation["boxes"]
_UpperCAmelCase = scores.tolist()
_UpperCAmelCase = [self.model.config.idalabel[label.item()] for label in labels]
_UpperCAmelCase = [self._get_bounding_box(a_ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
_UpperCAmelCase = ["score", "label", "box"]
_UpperCAmelCase = [
dict(zip(a_ , a_ ) )
for vals in zip(raw_annotation["scores"] , raw_annotation["labels"] , raw_annotation["boxes"] )
]
return annotation
def _a ( self , a_ ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError("The ObjectDetectionPipeline is only available in PyTorch." )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = box.int().tolist()
_UpperCAmelCase = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 657 | 0 |
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
__lowerCamelCase = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE_ )
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Dict , *__snake_case : Any , **__snake_case : List[str] ) -> Optional[int]:
super().__init__(*__snake_case , **__snake_case )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def lowerCamelCase__ ( self : Union[str, Any] , __snake_case : Optional[Any]=None , __snake_case : Dict=None , __snake_case : Optional[Any]=None ) -> Optional[Any]:
__magic_name__: Optional[Any] = {}
__magic_name__: List[Any] = {}
if prompt is not None:
__magic_name__: Tuple = prompt
if generate_kwargs is not None:
__magic_name__: Union[str, Any] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
__magic_name__: Any = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
__magic_name__: Union[str, Any] = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : str , __snake_case : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__snake_case : Dict ) -> Optional[Any]:
return super().__call__(__snake_case , **__snake_case )
def lowerCamelCase__ ( self : int , __snake_case : Tuple , __snake_case : Tuple=None ) -> Optional[int]:
__magic_name__: Optional[int] = load_image(__snake_case )
if prompt is not None:
if not isinstance(__snake_case , __snake_case ):
raise ValueError(
F'Received an invalid text input, got - {type(__snake_case )} - but expected a single string. '
"""Note also that one single text can be provided for conditional image to text generation.""" )
__magic_name__: Union[str, Any] = self.model.config.model_type
if model_type == "git":
__magic_name__: List[Any] = self.image_processor(images=__snake_case , return_tensors=self.framework )
__magic_name__: Dict = self.tokenizer(text=__snake_case , add_special_tokens=__snake_case ).input_ids
__magic_name__: Union[str, Any] = [self.tokenizer.cls_token_id] + input_ids
__magic_name__: List[Any] = torch.tensor(__snake_case ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
__magic_name__: Union[str, Any] = self.image_processor(images=__snake_case , header_text=__snake_case , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
__magic_name__: Union[str, Any] = self.image_processor(images=__snake_case , return_tensors=self.framework )
__magic_name__: Tuple = self.tokenizer(__snake_case , return_tensors=self.framework )
model_inputs.update(__snake_case )
else:
raise ValueError(F'Model type {model_type} does not support conditional text generation' )
else:
__magic_name__: Union[str, Any] = self.image_processor(images=__snake_case , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
__magic_name__: Union[str, Any] = None
return model_inputs
def lowerCamelCase__ ( self : Tuple , __snake_case : Union[str, Any] , __snake_case : Optional[Any]=None ) -> List[Any]:
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , __snake_case )
and all(x is None for x in model_inputs["""input_ids"""] )
):
__magic_name__: Optional[int] = None
if generate_kwargs is None:
__magic_name__: List[Any] = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
__magic_name__: Dict = model_inputs.pop(self.model.main_input_name )
__magic_name__: Optional[int] = self.model.generate(__snake_case , **__snake_case , **__snake_case )
return model_outputs
def lowerCamelCase__ ( self : List[Any] , __snake_case : Any ) -> Optional[Any]:
__magic_name__: Union[str, Any] = []
for output_ids in model_outputs:
__magic_name__: int = {
"""generated_text""": self.tokenizer.decode(
__snake_case , skip_special_tokens=__snake_case , )
}
records.append(__snake_case )
return records
| 96 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def merge(UpperCamelCase__ , UpperCamelCase__ ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(UpperCamelCase__ ) <= 1:
return collection
_UpperCAmelCase = len(UpperCamelCase__ ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__magic_name__ = input('''Enter numbers separated by a comma:\n''').strip()
__magic_name__ = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 657 | 0 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def a ( snake_case__: str ):
'''simple docstring'''
return "".join(sorted(snake_case__ ) )
def a ( snake_case__: str ):
'''simple docstring'''
return word_by_signature[signature(snake_case__ )]
__a = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
__a = sorted({word.strip().lower() for word in data.splitlines()})
__a = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
__a = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams))
| 97 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _lowerCAmelCase :
def __init__( self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ) -> List[str]:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = self.vocab_size - 1
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
_UpperCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> Optional[int]:
_UpperCAmelCase = OpenAIGPTModel(config=a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(a_ , token_type_ids=a_ , head_mask=a_ )
_UpperCAmelCase = model(a_ , token_type_ids=a_ )
_UpperCAmelCase = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> List[Any]:
_UpperCAmelCase = OpenAIGPTLMHeadModel(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> Optional[Any]:
_UpperCAmelCase = OpenAIGPTDoubleHeadsModel(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> Dict:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = OpenAIGPTForSequenceClassification(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase_ : Any = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowercase_ : Optional[Any] = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowercase_ : Union[str, Any] = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _a ( self , a_ , a_ , a_ , a_ , a_ ) -> Any:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _a ( self , a_ , a_ , a_=False ) -> Optional[int]:
_UpperCAmelCase = super()._prepare_for_class(a_ , a_ , return_labels=a_ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=a_ , )
_UpperCAmelCase = inputs_dict["labels"]
_UpperCAmelCase = inputs_dict["labels"]
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=a_ , )
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
return inputs_dict
def _a ( self ) -> Optional[int]:
_UpperCAmelCase = OpenAIGPTModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=a_ , n_embd=37 )
def _a ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _a ( self ) -> Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*a_ )
def _a ( self ) -> Tuple:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*a_ )
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*a_ )
@slow
def _a ( self ) -> int:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = OpenAIGPTModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def _a ( self ) -> Any:
_UpperCAmelCase = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" )
model.to(a_ )
_UpperCAmelCase = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=a_ ) # the president is
_UpperCAmelCase = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
_UpperCAmelCase = model.generate(a_ , do_sample=a_ )
self.assertListEqual(output_ids[0].tolist() , a_ )
| 657 | 0 |
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
lowercase__ : Optional[Any] = '\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n'
lowercase__ : Optional[int] = '\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n'
lowercase__ : Dict = '\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for \'record\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'prediction_text\': the predicted answer text\n - for \'multirc\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question-answer pair as specified by the dataset\n - \'prediction\': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for \'record\': list of question-answers dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'answers\': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for \'record\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1\': F1 score\n - for \'multirc\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1_m\': Per-question macro-F1 score\n - \'f1_a\': Average F1 score over all answers\n - for \'axb\':\n \'matthews_correlation\': Matthew Correlation\n - for \'cb\':\n - \'accuracy\': Accuracy\n - \'f1\': F1 score\n - for all others:\n - \'accuracy\': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')\n >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]\n >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')\n >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def a__ ( lowercase : int, lowercase : Any ) -> Tuple:
"""simple docstring"""
return float((preds == labels).mean() )
def a__ ( lowercase : List[str], lowercase : int, lowercase : str="binary" ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = simple_accuracy(lowercase, lowercase )
_UpperCamelCase = float(fa_score(y_true=lowercase, y_pred=lowercase, average=lowercase ) )
return {
"accuracy": acc,
"f1": fa,
}
def a__ ( lowercase : str, lowercase : List[str] ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = {}
for id_pred, label in zip(lowercase, lowercase ):
_UpperCamelCase = F"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"""
_UpperCamelCase = id_pred['''prediction''']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
_UpperCamelCase = [(pred, label)]
_UpperCamelCase , _UpperCamelCase = [], []
for question, preds_labels in question_map.items():
_UpperCamelCase , _UpperCamelCase = zip(*lowercase )
_UpperCamelCase = fa_score(y_true=lowercase, y_pred=lowercase, average='''macro''' )
fas.append(lowercase )
_UpperCamelCase = int(sum(pred == label for pred, label in preds_labels ) == len(lowercase ) )
ems.append(lowercase )
_UpperCamelCase = float(sum(lowercase ) / len(lowercase ) )
_UpperCamelCase = sum(lowercase ) / len(lowercase )
_UpperCamelCase = float(fa_score(y_true=lowercase, y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def snake_case__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None , )
def snake_case__ ( self : Tuple ) -> Any:
'''simple docstring'''
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"prediction_text": datasets.Value('''string''' ),
},
"references": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"answers": datasets.Sequence(datasets.Value('''string''' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('''int64''' ),
"paragraph": datasets.Value('''int64''' ),
"question": datasets.Value('''int64''' ),
},
"prediction": datasets.Value('''int64''' ),
},
"references": datasets.Value('''int64''' ),
}
else:
return {
"predictions": datasets.Value('''int64''' ),
"references": datasets.Value('''int64''' ),
}
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int ) -> Any:
'''simple docstring'''
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(lowerCAmelCase__ , lowerCAmelCase__ )}
elif self.config_name == "cb":
return acc_and_fa(lowerCAmelCase__ , lowerCAmelCase__ , fa_avg='''macro''' )
elif self.config_name == "record":
_UpperCamelCase = [
{
'''qas''': [
{'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]}
for ref in references
]
}
]
_UpperCamelCase = {pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions}
return evaluate_record(lowerCAmelCase__ , lowerCAmelCase__ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(lowerCAmelCase__ , lowerCAmelCase__ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(lowerCAmelCase__ , lowerCAmelCase__ )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
| 98 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__=10 ):
"""simple docstring"""
_UpperCAmelCase = []
for _ in range(UpperCamelCase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__=10 ):
"""simple docstring"""
_UpperCAmelCase = []
for step in range(UpperCamelCase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = os.path.join(UpperCamelCase__ , "schedule.bin" )
torch.save(scheduler.state_dict() , UpperCamelCase__ )
_UpperCAmelCase = torch.load(UpperCamelCase__ )
scheduler.load_state_dict(UpperCamelCase__ )
return lrs
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def _a ( self , a_ , a_ , a_ ) -> Optional[int]:
self.assertEqual(len(a_ ) , len(a_ ) )
for a, b in zip(a_ , a_ ):
self.assertAlmostEqual(a_ , a_ , delta=a_ )
def _a ( self ) -> str:
_UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=a_ )
_UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
_UpperCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_UpperCAmelCase = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(100 ):
_UpperCAmelCase = criterion(a_ , a_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=a_ )
_UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
_UpperCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_UpperCAmelCase = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=a_ , weight_decay=0.0 , relative_step=a_ , scale_parameter=a_ , warmup_init=a_ , )
for _ in range(1000 ):
_UpperCAmelCase = criterion(a_ , a_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
lowercase_ : List[Any] = nn.Linear(50 , 50 ) if is_torch_available() else None
lowercase_ : Tuple = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
lowercase_ : Dict = 10
def _a ( self , a_ , a_ , a_ , a_=None ) -> Union[str, Any]:
self.assertEqual(len(a_ ) , len(a_ ) )
for a, b in zip(a_ , a_ ):
self.assertAlmostEqual(a_ , a_ , delta=a_ , msg=a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
_UpperCAmelCase = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
_UpperCAmelCase , _UpperCAmelCase = data
_UpperCAmelCase = scheduler_func(self.optimizer , **a_ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
_UpperCAmelCase = unwrap_schedule(a_ , self.num_steps )
self.assertListAlmostEqual(
a_ , a_ , tol=1e-2 , msg=f"failed for {scheduler_func} in normal scheduler" , )
_UpperCAmelCase = scheduler_func(self.optimizer , **a_ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(a_ ) # wrap to test picklability of the schedule
_UpperCAmelCase = unwrap_and_save_reload_schedule(a_ , self.num_steps )
self.assertListEqual(a_ , a_ , msg=f"failed for {scheduler_func} in save and reload" )
class _lowerCAmelCase :
def __init__( self , a_ ) -> Union[str, Any]:
_UpperCAmelCase = fn
def __call__( self , *a_ , **a_ ) -> Union[str, Any]:
return self.fn(*a_ , **a_ )
@classmethod
def _a ( self , a_ ) -> Dict:
_UpperCAmelCase = list(map(self , scheduler.lr_lambdas ) )
| 657 | 0 |
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
__a = mf_knapsack(i - 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
__a = max(
mf_knapsack(i - 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , mf_knapsack(i - 1 , lowerCAmelCase__ , lowerCAmelCase__ , j - wt[i - 1] ) + val[i - 1] , )
__a = val
return f[i][j]
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
__a = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
__a = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
__a = dp[i - 1][w_]
return dp[n][w_], dp
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if not (isinstance(lowerCAmelCase__ , (list, tuple) ) and isinstance(lowerCAmelCase__ , (list, tuple) )):
raise ValueError(
"""Both the weights and values vectors must be either lists or tuples""" )
__a = len(lowerCAmelCase__ )
if num_items != len(lowerCAmelCase__ ):
__a = (
"""The number of weights must be the same as the number of values.\n"""
f'''But got {num_items} weights and {len(lowerCAmelCase__ )} values'''
)
raise ValueError(lowerCAmelCase__ )
for i in range(lowerCAmelCase__ ):
if not isinstance(wt[i] , lowerCAmelCase__ ):
__a = (
"""All weights must be integers but got weight of """
f'''type {type(wt[i] )} at index {i}'''
)
raise TypeError(lowerCAmelCase__ )
__a , __a = knapsack(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__a = set()
_construct_solution(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return optimal_val, example_optional_set
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(lowerCAmelCase__ , lowerCAmelCase__ , i - 1 , lowerCAmelCase__ , lowerCAmelCase__ )
else:
optimal_set.add(lowerCAmelCase__ )
_construct_solution(lowerCAmelCase__ , lowerCAmelCase__ , i - 1 , j - wt[i - 1] , lowerCAmelCase__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = [3, 2, 4, 4]
SCREAMING_SNAKE_CASE = [4, 3, 2, 3]
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = 6
SCREAMING_SNAKE_CASE = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('optimal_value = ', optimal_solution)
print('An optimal subset corresponding to the optimal value', optimal_subset)
| 99 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __lowerCamelCase ( UpperCamelCase__=None ):
"""simple docstring"""
if subparsers is not None:
_UpperCAmelCase = subparsers.add_parser("test" )
else:
_UpperCAmelCase = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=UpperCamelCase__ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase__ )
return parser
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
_UpperCAmelCase = script_name
else:
_UpperCAmelCase = f"--config_file={args.config_file} {script_name}"
_UpperCAmelCase = ["accelerate-launch"] + test_args.split()
_UpperCAmelCase = execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = test_command_parser()
_UpperCAmelCase = parser.parse_args()
test_command(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 657 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.