code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ : Tuple = {
'configuration_distilbert': [
'DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'DistilBertConfig',
'DistilBertOnnxConfig',
],
'tokenization_distilbert': ['DistilBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = ['DistilBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : int = [
'DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DistilBertForMaskedLM',
'DistilBertForMultipleChoice',
'DistilBertForQuestionAnswering',
'DistilBertForSequenceClassification',
'DistilBertForTokenClassification',
'DistilBertModel',
'DistilBertPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
'TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDistilBertForMaskedLM',
'TFDistilBertForMultipleChoice',
'TFDistilBertForQuestionAnswering',
'TFDistilBertForSequenceClassification',
'TFDistilBertForTokenClassification',
'TFDistilBertMainLayer',
'TFDistilBertModel',
'TFDistilBertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : int = [
'FlaxDistilBertForMaskedLM',
'FlaxDistilBertForMultipleChoice',
'FlaxDistilBertForQuestionAnswering',
'FlaxDistilBertForSequenceClassification',
'FlaxDistilBertForTokenClassification',
'FlaxDistilBertModel',
'FlaxDistilBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
A_ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 57
|
'''simple docstring'''
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def snake_case_ ( __snake_case : Union[str, Any] , __snake_case : Union[str, Any]=False) -> Optional[int]:
lowerCAmelCase_ = OmegaConf.load(__snake_case)
if display:
print(yaml.dump(OmegaConf.to_container(__snake_case)))
return config
def snake_case_ ( __snake_case : Any , __snake_case : Optional[int]=None , __snake_case : str=None) -> str:
if conf_path is None:
lowerCAmelCase_ = '''./model_checkpoints/vqgan_only.yaml'''
lowerCAmelCase_ = load_config(__snake_case , display=__snake_case)
lowerCAmelCase_ = VQModel(**config.model.params)
if ckpt_path is None:
lowerCAmelCase_ = '''./model_checkpoints/vqgan_only.pt'''
lowerCAmelCase_ = torch.load(__snake_case , map_location=__snake_case)
if ".ckpt" in ckpt_path:
lowerCAmelCase_ = sd['''state_dict''']
model.load_state_dict(__snake_case , strict=__snake_case)
model.to(__snake_case)
del sd
return model
def snake_case_ ( __snake_case : Union[str, Any] , __snake_case : str) -> Optional[int]:
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ = model.encode(__snake_case)
print(F'''VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}''')
lowerCAmelCase_ = model.decode(__snake_case)
return xrec
def snake_case_ ( __snake_case : int , __snake_case : List[Any]=False) -> Union[str, Any]:
lowerCAmelCase_ ,lowerCAmelCase_ = string.rsplit('''.''' , 1)
if reload:
lowerCAmelCase_ = importlib.import_module(__snake_case)
importlib.reload(__snake_case)
return getattr(importlib.import_module(__snake_case , package=__snake_case) , cls)
def snake_case_ ( __snake_case : Tuple) -> Union[str, Any]:
if "target" not in config:
raise KeyError('''Expected key `target` to instantiate.''')
return get_obj_from_str(config['''target'''])(**config.get('''params''' , {}))
def snake_case_ ( __snake_case : Optional[int] , __snake_case : Any , __snake_case : Optional[int]=True , __snake_case : Any=True) -> List[str]:
lowerCAmelCase_ = instantiate_from_config(__snake_case)
if sd is not None:
model.load_state_dict(__snake_case)
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def snake_case_ ( __snake_case : Any , __snake_case : Optional[int] , __snake_case : Dict , __snake_case : str) -> Any:
# load the specified checkpoint
if ckpt:
lowerCAmelCase_ = torch.load(__snake_case , map_location='''cpu''')
lowerCAmelCase_ = pl_sd['''global_step''']
print(F'''loaded model from global step {global_step}.''')
else:
lowerCAmelCase_ = {'''state_dict''': None}
lowerCAmelCase_ = None
lowerCAmelCase_ = load_model_from_config(config.model , pl_sd['''state_dict'''] , gpu=__snake_case , eval_mode=__snake_case)['''model''']
return model, global_step
| 274
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int , __lowerCamelCase : List[str] , __lowerCamelCase : Dict=7 , __lowerCamelCase : str=3 , __lowerCamelCase : Optional[Any]=18 , __lowerCamelCase : Optional[int]=30 , __lowerCamelCase : str=400 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Any=None , __lowerCamelCase : Tuple=True , __lowerCamelCase : int=None , __lowerCamelCase : List[str]=True , __lowerCamelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCamelCase : Any=[0.5, 0.5, 0.5] , ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = size if size is not None else {"shortest_edge": 18}
lowerCamelCase__ = crop_size if crop_size is not None else {"height": 18, "width": 18}
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = image_size
lowerCamelCase__ = min_resolution
lowerCamelCase__ = max_resolution
lowerCamelCase__ = do_resize
lowerCamelCase__ = size
lowerCamelCase__ = do_center_crop
lowerCamelCase__ = crop_size
lowerCamelCase__ = do_normalize
lowerCamelCase__ = image_mean
lowerCamelCase__ = image_std
def a__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowercase ( __lowercase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = LevitImageProcessor if is_vision_available() else None
def a__ ( self : str ) -> Dict:
'''simple docstring'''
lowerCamelCase__ = LevitImageProcessingTester(self )
@property
def a__ ( self : Optional[int] ) -> int:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self : Any ) -> List[str]:
'''simple docstring'''
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , "image_mean" ) )
self.assertTrue(hasattr(__A , "image_std" ) )
self.assertTrue(hasattr(__A , "do_normalize" ) )
self.assertTrue(hasattr(__A , "do_resize" ) )
self.assertTrue(hasattr(__A , "do_center_crop" ) )
self.assertTrue(hasattr(__A , "size" ) )
def a__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def a__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
pass
def a__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
lowerCamelCase__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCamelCase__ = image_processing(__A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def a__ ( self : Tuple ) -> int:
'''simple docstring'''
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
lowerCamelCase__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCamelCase__ = image_processing(__A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def a__ ( self : int ) -> int:
'''simple docstring'''
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
lowerCamelCase__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCamelCase__ = image_processing(__A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 714
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : Optional[int] = logging.get_logger(__name__)
__A : Tuple = {
"""google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class lowercase ( _lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = "mobilenet_v1"
def __init__( self : Union[str, Any] , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Any=224 , __lowerCamelCase : Tuple=1.0 , __lowerCamelCase : Optional[int]=8 , __lowerCamelCase : str="relu6" , __lowerCamelCase : Any=True , __lowerCamelCase : Union[str, Any]=0.9_9_9 , __lowerCamelCase : List[Any]=0.0_2 , __lowerCamelCase : str=0.0_0_1 , **__lowerCamelCase : str , ) -> Dict:
'''simple docstring'''
super().__init__(**__lowerCamelCase )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
lowerCamelCase__ = num_channels
lowerCamelCase__ = image_size
lowerCamelCase__ = depth_multiplier
lowerCamelCase__ = min_depth
lowerCamelCase__ = hidden_act
lowerCamelCase__ = tf_padding
lowerCamelCase__ = classifier_dropout_prob
lowerCamelCase__ = initializer_range
lowerCamelCase__ = layer_norm_eps
class lowercase ( _lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = version.parse("1.11" )
@property
def a__ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def a__ ( self : str ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def a__ ( self : List[Any] ) -> float:
'''simple docstring'''
return 1E-4
| 187
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase : List[Any] = logging.get_logger(__name__)
lowercase : Dict = {
"""shi-labs/dinat-mini-in1k-224""": """https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json""",
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class A__ ( __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
__A : Optional[Any] = '''dinat'''
__A : List[Any] = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , lowercase=4 , lowercase=3 , lowercase=64 , lowercase=[3, 4, 6, 5] , lowercase=[2, 4, 8, 16] , lowercase=7 , lowercase=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , lowercase=3.0 , lowercase=True , lowercase=0.0 , lowercase=0.0 , lowercase=0.1 , lowercase="gelu" , lowercase=0.02 , lowercase=1e-5 , lowercase=0.0 , lowercase=None , lowercase=None , **lowercase , ) -> Dict:
'''simple docstring'''
super().__init__(**lowercase)
a__ : Dict = patch_size
a__ : int = num_channels
a__ : Union[str, Any] = embed_dim
a__ : List[Any] = depths
a__ : Tuple = len(lowercase)
a__ : List[Any] = num_heads
a__ : Any = kernel_size
a__ : List[Any] = dilations
a__ : Any = mlp_ratio
a__ : List[Any] = qkv_bias
a__ : List[Any] = hidden_dropout_prob
a__ : str = attention_probs_dropout_prob
a__ : Optional[int] = drop_path_rate
a__ : List[Any] = hidden_act
a__ : List[Any] = layer_norm_eps
a__ : str = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
a__ : Any = int(embed_dim * 2 ** (len(lowercase) - 1))
a__ : Union[str, Any] = layer_scale_init_value
a__ : Union[str, Any] = ['stem'] + [F'stage{idx}' for idx in range(1 , len(lowercase) + 1)]
a__ , a__ : int = get_aligned_output_features_output_indices(
out_features=lowercase , out_indices=lowercase , stage_names=self.stage_names)
| 302
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class A__ :
"""simple docstring"""
__A : Optional[int] = None
__A : Optional[jnp.ndarray] = None
__A : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def __lowercase ( cls) -> Union[str, Any]:
'''simple docstring'''
return cls()
@dataclass
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : jnp.ndarray
__A : jnp.ndarray
__A : KarrasVeSchedulerState
class A__ ( __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
@property
def __lowercase ( self) -> str:
'''simple docstring'''
return True
@register_to_config
def __init__( self , lowercase = 0.02 , lowercase = 100 , lowercase = 1.0_07 , lowercase = 80 , lowercase = 0.05 , lowercase = 50 , ) -> List[Any]:
'''simple docstring'''
pass
def __lowercase ( self) -> str:
'''simple docstring'''
return KarrasVeSchedulerState.create()
def __lowercase ( self , lowercase , lowercase , lowercase = ()) -> KarrasVeSchedulerState:
'''simple docstring'''
a__ : Any = jnp.arange(0 , lowercase)[::-1].copy()
a__ : Any = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=lowercase , schedule=jnp.array(lowercase , dtype=jnp.floataa) , timesteps=lowercase , )
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , ) -> Tuple[jnp.ndarray, float]:
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
a__ : List[str] = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1)
else:
a__ : str = 0
# sample eps ~ N(0, S_noise^2 * I)
a__ : Optional[Any] = random.split(lowercase , num=1)
a__ : Optional[Any] = self.config.s_noise * random.normal(key=lowercase , shape=sample.shape)
a__ : str = sigma + gamma * sigma
a__ : int = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
'''simple docstring'''
a__ : Union[str, Any] = sample_hat + sigma_hat * model_output
a__ : Tuple = (sample_hat - pred_original_sample) / sigma_hat
a__ : Dict = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowercase , derivative=lowercase , state=lowercase)
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
'''simple docstring'''
a__ : Optional[int] = sample_prev + sigma_prev * model_output
a__ : Union[str, Any] = (sample_prev - pred_original_sample) / sigma_prev
a__ : List[Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowercase , derivative=lowercase , state=lowercase)
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase) -> int:
'''simple docstring'''
raise NotImplementedError()
| 302
| 1
|
from string import ascii_lowercase, ascii_uppercase
def lowerCamelCase__ ( a : str ) -> str:
"""simple docstring"""
if not sentence:
return ""
a__ :str = dict(zip(a , a ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 373
|
import datasets
from .evaluate import evaluate
snake_case__ = '''\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
'''
snake_case__ = '''
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
'''
snake_case__ = '''
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the SQuAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
Examples:
>>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]
>>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]
>>> squad_metric = datasets.load_metric("squad")
>>> results = squad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION)
class lowerCAmelCase_ ( datasets.Metric):
def _snake_case ( self : List[str] ) ->Union[str, Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {"id": datasets.Value("string" ), "prediction_text": datasets.Value("string" )},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , )
def _snake_case ( self : Dict , __A : List[Any] , __A : Optional[int] ) ->str:
"""simple docstring"""
a__ :Optional[int] = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
a__ :Optional[Any] = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
a__ :Union[str, Any] = evaluate(dataset=__A , predictions=__A )
return score
| 373
| 1
|
"""simple docstring"""
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> Optional[int]:
UpperCAmelCase__ : Optional[int] = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> str:
UpperCAmelCase__ : int = 0
while b > 0:
if b & 1:
UpperCAmelCase__ : Any = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 182
|
"""simple docstring"""
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"""pipelines_utils""",
"""0.22.0""",
"""Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""",
standard_warn=False,
stacklevel=3,
)
| 182
| 1
|
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
__A : int = get_logger(__name__)
class UpperCAmelCase_ ( enum.Enum ):
'''simple docstring'''
a__ = '''all_checks'''
a__ = '''basic_checks'''
a__ = '''no_checks'''
class UpperCAmelCase_ ( __a ):
'''simple docstring'''
class UpperCAmelCase_ ( __a ):
'''simple docstring'''
class UpperCAmelCase_ ( __a ):
'''simple docstring'''
class UpperCAmelCase_ ( __a ):
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if expected_checksums is None:
logger.info("""Unable to verify checksums.""" )
return
if len(set(snake_case__ ) - set(snake_case__ ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(snake_case__ ) - set(snake_case__ ) ) )
if len(set(snake_case__ ) - set(snake_case__ ) ) > 0:
raise UnexpectedDownloadedFile(str(set(snake_case__ ) - set(snake_case__ ) ) )
SCREAMING_SNAKE_CASE = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
SCREAMING_SNAKE_CASE = """ for """ + verification_name if verification_name is not None else """"""
if len(snake_case__ ) > 0:
raise NonMatchingChecksumError(
f"""Checksums didn't match{for_verification_name}:\n"""
f"""{bad_urls}\n"""
"""Set `verification_mode='no_checks'` to skip checksums verification and ignore this error""" )
logger.info("""All the checksums matched successfully""" + for_verification_name )
class UpperCAmelCase_ ( __a ):
'''simple docstring'''
class UpperCAmelCase_ ( __a ):
'''simple docstring'''
class UpperCAmelCase_ ( __a ):
'''simple docstring'''
class UpperCAmelCase_ ( __a ):
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if expected_splits is None:
logger.info("""Unable to verify splits sizes.""" )
return
if len(set(snake_case__ ) - set(snake_case__ ) ) > 0:
raise ExpectedMoreSplits(str(set(snake_case__ ) - set(snake_case__ ) ) )
if len(set(snake_case__ ) - set(snake_case__ ) ) > 0:
raise UnexpectedSplits(str(set(snake_case__ ) - set(snake_case__ ) ) )
SCREAMING_SNAKE_CASE = [
{"""expected""": expected_splits[name], """recorded""": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(snake_case__ ) > 0:
raise NonMatchingSplitsSizesError(str(snake_case__ ) )
logger.info("""All the splits matched successfully.""" )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True ):
'''simple docstring'''
if record_checksum:
SCREAMING_SNAKE_CASE = shaaaa()
with open(snake_case__ , """rb""" ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , B"""""" ):
m.update(snake_case__ )
SCREAMING_SNAKE_CASE = m.hexdigest()
else:
SCREAMING_SNAKE_CASE = None
return {"num_bytes": os.path.getsize(snake_case__ ), "checksum": checksum}
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 706
|
from __future__ import annotations
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
create_state_space_tree(SCREAMING_SNAKE_CASE , [] , 0 , [0 for i in range(len(SCREAMING_SNAKE_CASE ) )] )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ):
'''simple docstring'''
if index == len(SCREAMING_SNAKE_CASE ):
print(SCREAMING_SNAKE_CASE )
return
for i in range(len(SCREAMING_SNAKE_CASE ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
SCREAMING_SNAKE_CASE = True
create_state_space_tree(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , index + 1 , SCREAMING_SNAKE_CASE )
current_sequence.pop()
SCREAMING_SNAKE_CASE = False
__A : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
__A : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 450
| 0
|
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 40
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''ViTFeatureExtractor''']
__UpperCAmelCase = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 40
| 1
|
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
a = _symbol_database.Default()
a = _descriptor_pool.Default().AddSerializedFile(
b'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'
)
a = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
a = None
a = b'H\003'
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
a = 4_5
a = 1_5_8_1
a = 1_5_1_7
a = 1_5_7_0
a = 1_5_8_4
a = 1_7_9_3
a = 1_7_9_5
a = 1_9_1_6
a = 1_8_6_4
a = 1_9_0_5
a = 1_9_1_9
a = 2_4_2_9
a = 2_2_0_8
a = 2_4_1_8
a = 2_3_2_3
a = 2_4_0_7
# @@protoc_insertion_point(module_scope)
| 713
|
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
a = logging.get_logger(__name__)
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : List[Any] = ['pixel_values']
def __init__( self : List[str] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **UpperCamelCase__ : Dict , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
lowercase_ = size if size is not None else {"""shortest_edge""": 224}
lowercase_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
lowercase_ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
lowercase_ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" )
lowercase_ = do_resize
lowercase_ = size
lowercase_ = resample
lowercase_ = do_center_crop
lowercase_ = crop_size
lowercase_ = do_rescale
lowercase_ = rescale_factor
lowercase_ = do_normalize
lowercase_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowercase_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
lowercase_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
lowercase_ = int((256 / 224) * size["""shortest_edge"""] )
lowercase_ = get_resize_output_image_size(UpperCamelCase__ , size=UpperCamelCase__ , default_to_square=UpperCamelCase__ )
lowercase_ = {"""height""": output_size[0], """width""": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' )
return resize(
UpperCamelCase__ , size=(size_dict["""height"""], size_dict["""width"""]) , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ):
'''simple docstring'''
lowercase_ = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ):
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[str] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : Dict , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = None , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = None , UpperCamelCase__ : Optional[TensorType] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
lowercase_ = do_resize if do_resize is not None else self.do_resize
lowercase_ = resample if resample is not None else self.resample
lowercase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase_ = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ = image_mean if image_mean is not None else self.image_mean
lowercase_ = image_std if image_std is not None else self.image_std
lowercase_ = size if size is not None else self.size
lowercase_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
lowercase_ = crop_size if crop_size is not None else self.crop_size
lowercase_ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" )
lowercase_ = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
lowercase_ = [self.resize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for image in images]
if do_center_crop:
lowercase_ = [self.center_crop(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
if do_rescale:
lowercase_ = [self.rescale(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
if do_normalize:
lowercase_ = [self.normalize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for image in images]
lowercase_ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
lowercase_ = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 650
| 0
|
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class A_ :
@property
def _lowercase ( self ):
'''simple docstring'''
return self.get_dummy_input()
@property
def _lowercase ( self ):
'''simple docstring'''
if self.block_type == "down":
return (4, 3_2, 1_6, 1_6)
elif self.block_type == "mid":
return (4, 3_2, 3_2, 3_2)
elif self.block_type == "up":
return (4, 3_2, 6_4, 6_4)
raise ValueError(F"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" )
def _lowercase ( self , _A=True , _A=False , _A=False , _A=False , ):
'''simple docstring'''
UpperCAmelCase = 4
UpperCAmelCase = 3_2
UpperCAmelCase = (3_2, 3_2)
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = torch.device(_A )
UpperCAmelCase = (batch_size, num_channels) + sizes
UpperCAmelCase = randn_tensor(_A , generator=_A , device=_A )
UpperCAmelCase = {'''hidden_states''': hidden_states}
if include_temb:
UpperCAmelCase = 1_2_8
UpperCAmelCase = randn_tensor((batch_size, temb_channels) , generator=_A , device=_A )
if include_res_hidden_states_tuple:
UpperCAmelCase = torch.manual_seed(1 )
UpperCAmelCase = (randn_tensor(_A , generator=_A , device=_A ),)
if include_encoder_hidden_states:
UpperCAmelCase = floats_tensor((batch_size, 3_2, 3_2) ).to(_A )
if include_skip_sample:
UpperCAmelCase = randn_tensor(((batch_size, 3) + sizes) , generator=_A , device=_A )
return dummy_input
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = {
'''in_channels''': 3_2,
'''out_channels''': 3_2,
'''temb_channels''': 1_2_8,
}
if self.block_type == "up":
UpperCAmelCase = 3_2
if self.block_type == "mid":
init_dict.pop('''out_channels''' )
UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def _lowercase ( self , _A ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.prepare_init_args_and_inputs_for_common()
UpperCAmelCase = self.block_class(**_A )
unet_block.to(_A )
unet_block.eval()
with torch.no_grad():
UpperCAmelCase = unet_block(**_A )
if isinstance(_A , _A ):
UpperCAmelCase = output[0]
self.assertEqual(output.shape , self.output_shape )
UpperCAmelCase = output[0, -1, -3:, -3:]
UpperCAmelCase = torch.tensor(_A ).to(_A )
assert torch_all_close(output_slice.flatten() , _A , atol=5E-3 )
@unittest.skipIf(torch_device == '''mps''' , '''Training is not supported in mps''' )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.prepare_init_args_and_inputs_for_common()
UpperCAmelCase = self.block_class(**_A )
model.to(_A )
model.train()
UpperCAmelCase = model(**_A )
if isinstance(_A , _A ):
UpperCAmelCase = output[0]
UpperCAmelCase = torch.device(_A )
UpperCAmelCase = randn_tensor(output.shape , device=_A )
UpperCAmelCase = torch.nn.functional.mse_loss(_A , _A )
loss.backward()
| 130
|
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__A : Dict = random.Random()
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__=1.0 , UpperCamelCase__=None , UpperCamelCase__=None ) -> Any:
'''simple docstring'''
if rng is None:
UpperCAmelCase = global_rng
UpperCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class A_ (unittest.TestCase ):
def __init__( self , _A , _A=7 , _A=4_0_0 , _A=2_0_0_0 , _A=1 , _A=0.0 , _A=1_6_0_0_0 , _A=True , _A=True , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = min_seq_length
UpperCAmelCase = max_seq_length
UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCAmelCase = feature_size
UpperCAmelCase = padding_value
UpperCAmelCase = sampling_rate
UpperCAmelCase = return_attention_mask
UpperCAmelCase = do_normalize
def _lowercase ( self ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _lowercase ( self , _A=False , _A=False ):
'''simple docstring'''
def _flatten(_A ):
return list(itertools.chain(*_A ) )
if equal_length:
UpperCAmelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCAmelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase = [np.asarray(_A ) for x in speech_inputs]
return speech_inputs
class A_ (a_ , unittest.TestCase ):
UpperCAmelCase__ = WavaVecaFeatureExtractor
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = WavaVecaFeatureExtractionTester(self )
def _lowercase ( self , _A ):
'''simple docstring'''
self.assertTrue(np.all(np.mean(_A , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(_A , axis=0 ) - 1 ) < 1E-3 ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = [np.asarray(_A ) for speech_input in speech_inputs]
# Test not batched input
UpperCAmelCase = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
UpperCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
# Test batched
UpperCAmelCase = feat_extract(_A , return_tensors='''np''' ).input_values
UpperCAmelCase = feat_extract(_A , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
UpperCAmelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
UpperCAmelCase = np.asarray(_A )
UpperCAmelCase = feat_extract(_A , return_tensors='''np''' ).input_values
UpperCAmelCase = feat_extract(_A , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = ['''longest''', '''max_length''', '''do_not_pad''']
UpperCAmelCase = [None, 1_6_0_0, None]
for max_length, padding in zip(_A , _A ):
UpperCAmelCase = feat_extract(_A , padding=_A , max_length=_A , return_tensors='''np''' )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self.assertTrue(input_values[0][8_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = range(8_0_0 , 1_4_0_0 , 2_0_0 )
UpperCAmelCase = [floats_list((1, x) )[0] for x in lengths]
UpperCAmelCase = ['''longest''', '''max_length''', '''do_not_pad''']
UpperCAmelCase = [None, 1_6_0_0, None]
for max_length, padding in zip(_A , _A ):
UpperCAmelCase = feat_extract(_A , max_length=_A , padding=_A )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = feat_extract(
_A , truncation=_A , max_length=1_0_0_0 , padding='''max_length''' , return_tensors='''np''' )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = feat_extract(
_A , truncation=_A , max_length=1_0_0_0 , padding='''longest''' , return_tensors='''np''' )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0) )
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = feat_extract(
_A , truncation=_A , max_length=2_0_0_0 , padding='''longest''' , return_tensors='''np''' )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0) )
@require_torch
def _lowercase ( self ):
'''simple docstring'''
import torch
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = np.random.rand(1_0_0 ).astype(np.floataa )
UpperCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCAmelCase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCAmelCase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def _lowercase ( self ):
'''simple docstring'''
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
UpperCAmelCase = WavaVecaConfig.from_pretrained(_A )
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(_A )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == '''layer''' )
| 130
| 1
|
"""simple docstring"""
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__A = """▁"""
__A = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :Dict = BigBirdTokenizer
__magic_name__ :List[Any] = BigBirdTokenizerFast
__magic_name__ :Optional[int] = True
__magic_name__ :str = True
def snake_case ( self ):
'''simple docstring'''
super().setUp()
lowerCAmelCase__ :List[Any] = self.tokenizer_class(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = '<s>'
lowerCAmelCase__ :Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '[MASK]' )
self.assertEqual(len(__UpperCAmelCase ) , 1_0_0_4 )
def snake_case ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 )
def snake_case ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowerCAmelCase__ :int = self.get_tokenizer()
lowerCAmelCase__ :Dict = self.get_rust_tokenizer()
lowerCAmelCase__ :Any = 'I was born in 92000, and this is falsé.'
lowerCAmelCase__ :str = tokenizer.tokenize(__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = rust_tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
lowerCAmelCase__ :Tuple = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Tuple = self.get_rust_tokenizer()
lowerCAmelCase__ :Optional[int] = tokenizer.encode(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = rust_tokenizer.encode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = BigBirdTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(__UpperCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , )
lowerCAmelCase__ :Any = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowerCAmelCase__ :List[str] = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , )
lowerCAmelCase__ :List[str] = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def snake_case ( self ):
'''simple docstring'''
return BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base' )
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = 'Hello World!'
lowerCAmelCase__ :Union[str, Any] = [6_5, 1_8_5_3_6, 2_2_6_0, 1_0_1, 6_6]
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) )
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
# fmt: off
lowerCAmelCase__ :List[str] = [6_5, 8_7_1, 4_1_9, 3_5_8, 9_4_6, 9_9_1, 2_5_2_1, 4_5_2, 3_5_8, 1_3_5_7, 3_8_7, 7_7_5_1, 3_5_3_6, 1_1_2, 9_8_5, 4_5_6, 1_2_6, 8_6_5, 9_3_8, 5_4_0_0, 5_7_3_4, 4_5_8, 1_3_6_8, 4_6_7, 7_8_6, 2_4_6_2, 5_2_4_6, 1_1_5_9, 6_3_3, 8_6_5, 4_5_1_9, 4_5_7, 5_8_2, 8_5_2, 2_5_5_7, 4_2_7, 9_1_6, 5_0_8, 4_0_5, 3_4_3_2_4, 4_9_7, 3_9_1, 4_0_8, 1_1_3_4_2, 1_2_4_4, 3_8_5, 1_0_0, 9_3_8, 9_8_5, 4_5_6, 5_7_4, 3_6_2, 1_2_5_9_7, 3_2_0_0, 3_1_2_9, 1_1_7_2, 6_6] # noqa: E231
# fmt: on
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) )
@require_torch
@slow
def snake_case ( self ):
'''simple docstring'''
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
lowerCAmelCase__ :int = list(self.big_tokenizer.get_vocab().keys() )[:1_0]
lowerCAmelCase__ :Dict = ' '.join(__UpperCAmelCase )
lowerCAmelCase__ :Dict = self.big_tokenizer.encode_plus(__UpperCAmelCase , return_tensors='pt' , return_token_type_ids=__UpperCAmelCase )
lowerCAmelCase__ :int = self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = BigBirdConfig(attention_type='original_full' )
lowerCAmelCase__ :str = BigBirdModel(__UpperCAmelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__UpperCAmelCase )
model(**__UpperCAmelCase )
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base' )
lowerCAmelCase__ :Any = tokenizer.decode(tokenizer('Paris is the [MASK].' ).input_ids )
self.assertTrue(decoded_text == '[CLS] Paris is the[MASK].[SEP]' )
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = {'input_ids': [[6_5, 3_9_2_8_6, 4_5_8, 3_6_3_3_5, 2_0_0_1, 4_5_6, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 7_7_4_6, 1_7_4_1, 1_1_1_5_7, 3_9_1, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 3_9_6_7, 3_5_4_1_2, 1_1_3, 4_9_3_6, 1_0_9, 3_8_7_0, 2_3_7_7, 1_1_3, 3_0_0_8_4, 4_5_7_2_0, 4_5_8, 1_3_4, 1_7_4_9_6, 1_1_2, 5_0_3, 1_1_6_7_2, 1_1_3, 1_1_8, 1_1_2, 5_6_6_5, 1_3_3_4_7, 3_8_6_8_7, 1_1_2, 1_4_9_6, 3_1_3_8_9, 1_1_2, 3_2_6_8, 4_7_2_6_4, 1_3_4, 9_6_2, 1_1_2, 1_6_3_7_7, 8_0_3_5, 2_3_1_3_0, 4_3_0, 1_2_1_6_9, 1_5_5_1_8, 2_8_5_9_2, 4_5_8, 1_4_6, 4_1_6_9_7, 1_0_9, 3_9_1, 1_2_1_6_9, 1_5_5_1_8, 1_6_6_8_9, 4_5_8, 1_4_6, 4_1_3_5_8, 1_0_9, 4_5_2, 7_2_6, 4_0_3_4, 1_1_1, 7_6_3, 3_5_4_1_2, 5_0_8_2, 3_8_8, 1_9_0_3, 1_1_1, 9_0_5_1, 3_9_1, 2_8_7_0, 4_8_9_1_8, 1_9_0_0, 1_1_2_3, 5_5_0, 9_9_8, 1_1_2, 9_5_8_6, 1_5_9_8_5, 4_5_5, 3_9_1, 4_1_0, 2_2_9_5_5, 3_7_6_3_6, 1_1_4, 6_6], [6_5, 4_4_8, 1_7_4_9_6, 4_1_9, 3_6_6_3, 3_8_5, 7_6_3, 1_1_3, 2_7_5_3_3, 2_8_7_0, 3_2_8_3, 1_3_0_4_3, 1_6_3_9, 2_4_7_1_3, 5_2_3, 6_5_6, 2_4_0_1_3, 1_8_5_5_0, 2_5_2_1, 5_1_7, 2_7_0_1_4, 2_1_2_4_4, 4_2_0, 1_2_1_2, 1_4_6_5, 3_9_1, 9_2_7, 4_8_3_3, 3_8_8, 5_7_8, 1_1_7_8_6, 1_1_4, 6_6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [6_5, 4_8_4, 2_1_6_9, 7_6_8_7, 2_1_9_3_2, 1_8_1_4_6, 7_2_6, 3_6_3, 1_7_0_3_2, 3_3_9_1, 1_1_4, 6_6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name='google/bigbird-roberta-base' , revision='215c99f1600e06f83acce68422f2035b2b5c3510' , )
| 716
|
"""simple docstring"""
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
__A = logging.get_logger(__name__)
def __A (_SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) ->List[Any]:
"""simple docstring"""
return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE )
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
__magic_name__ :List[str] = list_field(
default=[] , metadata={
"""help""": (
"""Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"""
""" of all available models"""
)
} , )
__magic_name__ :List[int] = list_field(
default=[8] , metadata={"""help""": """List of batch sizes for which memory and time performance will be evaluated"""} )
__magic_name__ :List[int] = list_field(
default=[8, 32, 128, 512] , metadata={"""help""": """List of sequence lengths for which memory and time performance will be evaluated"""} , )
__magic_name__ :bool = field(
default=a , metadata={"""help""": """Whether to benchmark inference of model. Inference can be disabled via --no-inference."""} , )
__magic_name__ :bool = field(
default=a , metadata={"""help""": """Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."""} , )
__magic_name__ :bool = field(
default=a , metadata={"""help""": """Whether to run on available tpu devices. TPU can be disabled via --no-tpu."""} )
__magic_name__ :bool = field(default=a , metadata={"""help""": """Use FP16 to accelerate inference."""} )
__magic_name__ :bool = field(default=a , metadata={"""help""": """Benchmark training of model"""} )
__magic_name__ :bool = field(default=a , metadata={"""help""": """Verbose memory tracing"""} )
__magic_name__ :bool = field(
default=a , metadata={"""help""": """Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."""} , )
__magic_name__ :bool = field(
default=a , metadata={
"""help""": """Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"""
} , )
__magic_name__ :bool = field(default=a , metadata={"""help""": """Trace memory line by line"""} )
__magic_name__ :bool = field(default=a , metadata={"""help""": """Save result to a CSV file"""} )
__magic_name__ :bool = field(default=a , metadata={"""help""": """Save all print statements in a log file"""} )
__magic_name__ :bool = field(default=a , metadata={"""help""": """Whether to print environment information"""} )
__magic_name__ :bool = field(
default=a , metadata={
"""help""": (
"""Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"""
""" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"""
""" for debugging / testing and on TPU."""
)
} , )
__magic_name__ :str = field(
default=f"""inference_time_{round(time() )}.csv""" , metadata={"""help""": """CSV filename used if saving time results to csv."""} , )
__magic_name__ :str = field(
default=f"""inference_memory_{round(time() )}.csv""" , metadata={"""help""": """CSV filename used if saving memory results to csv."""} , )
__magic_name__ :str = field(
default=f"""train_time_{round(time() )}.csv""" , metadata={"""help""": """CSV filename used if saving time results to csv for training."""} , )
__magic_name__ :str = field(
default=f"""train_memory_{round(time() )}.csv""" , metadata={"""help""": """CSV filename used if saving memory results to csv for training."""} , )
__magic_name__ :str = field(
default=f"""env_info_{round(time() )}.csv""" , metadata={"""help""": """CSV filename used if saving environment information."""} , )
__magic_name__ :str = field(
default=f"""log_{round(time() )}.csv""" , metadata={"""help""": """Log filename used if print statements are saved in log."""} , )
__magic_name__ :int = field(default=3 , metadata={"""help""": """Times an experiment will be run."""} )
__magic_name__ :bool = field(
default=a , metadata={
"""help""": (
"""Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"""
""" model weights."""
)
} , )
def snake_case ( self ):
'''simple docstring'''
warnings.warn(
F"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"
' are deprecated in general and it is advised to use external Benchmarking libraries '
' to benchmark Transformer models.' , __UpperCAmelCase , )
def snake_case ( self ):
'''simple docstring'''
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def snake_case ( self ):
'''simple docstring'''
if len(self.models ) <= 0:
raise ValueError(
'Please make sure you provide at least one model name / model identifier, *e.g.* `--models'
' bert-base-cased` or `args.models = [\'bert-base-cased\'].' )
return self.models
@property
def snake_case ( self ):
'''simple docstring'''
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('Multiprocessing is currently not possible on TPU.' )
return False
else:
return True
| 560
| 0
|
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowercase ( lowercase_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = BarthezTokenizer
__SCREAMING_SNAKE_CASE : str = BarthezTokenizerFast
__SCREAMING_SNAKE_CASE : Dict = True
__SCREAMING_SNAKE_CASE : Dict = True
def a ( self ):
super().setUp()
snake_case_ = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=snake_case )
snake_case_ = tokenizer
def a ( self ):
snake_case_ = '<pad>'
snake_case_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case ) , snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case ) , snake_case )
def a ( self ):
snake_case_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(snake_case ) , 10_1122 )
def a ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_1122 )
@require_torch
def a ( self ):
snake_case_ = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
snake_case_ = [0, 57, 3018, 7_0307, 91, 2]
snake_case_ = self.tokenizer(
snake_case , max_length=len(snake_case ) , padding=snake_case , truncation=snake_case , return_tensors='pt' )
self.assertIsInstance(snake_case , snake_case )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
snake_case_ = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case , snake_case )
def a ( self ):
if not self.test_rust_tokenizer:
return
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_rust_tokenizer()
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = tokenizer.tokenize(snake_case )
snake_case_ = rust_tokenizer.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
snake_case_ = tokenizer.encode(snake_case , add_special_tokens=snake_case )
snake_case_ = rust_tokenizer.encode(snake_case , add_special_tokens=snake_case )
self.assertListEqual(snake_case , snake_case )
snake_case_ = self.get_rust_tokenizer()
snake_case_ = tokenizer.encode(snake_case )
snake_case_ = rust_tokenizer.encode(snake_case )
self.assertListEqual(snake_case , snake_case )
@slow
def a ( self ):
# fmt: off
snake_case_ = {'input_ids': [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
snake_case_ = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=snake_case , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=snake_case , )
| 362
|
_UpperCAmelCase : Any = 6_5521
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = 1
snake_case_ = 0
for plain_chr in plain_text:
snake_case_ = (a + ord(UpperCamelCase__ )) % MOD_ADLER
snake_case_ = (b + a) % MOD_ADLER
return (b << 16) | a
| 362
| 1
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
UpperCAmelCase__ = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ):
"""simple docstring"""
for attribute in key.split(""".""" ):
_UpperCAmelCase = getattr(lowercase ,lowercase )
if weight_type is not None:
_UpperCAmelCase = getattr(lowercase ,lowercase ).shape
else:
_UpperCAmelCase = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_UpperCAmelCase = value
elif weight_type == "weight_g":
_UpperCAmelCase = value
elif weight_type == "weight_v":
_UpperCAmelCase = value
elif weight_type == "bias":
_UpperCAmelCase = value
else:
_UpperCAmelCase = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = fairseq_model.state_dict()
_UpperCAmelCase = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
_UpperCAmelCase = None
for name, value in fairseq_dict.items():
_UpperCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
lowercase ,lowercase ,lowercase ,lowercase ,hf_model.config.feat_extract_norm == """group""" ,)
_UpperCAmelCase = True
elif name.split(""".""" )[0] == "proj":
_UpperCAmelCase = fairseq_model.proj
_UpperCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
_UpperCAmelCase = True
if "*" in mapped_key:
_UpperCAmelCase = name.split(lowercase )[0].split(""".""" )[-2]
_UpperCAmelCase = mapped_key.replace("""*""" ,lowercase )
if "weight_g" in name:
_UpperCAmelCase = """weight_g"""
elif "weight_v" in name:
_UpperCAmelCase = """weight_v"""
elif "bias" in name:
_UpperCAmelCase = """bias"""
elif "weight" in name:
_UpperCAmelCase = """weight"""
else:
_UpperCAmelCase = None
set_recursively(lowercase ,lowercase ,lowercase ,lowercase ,lowercase )
continue
if not is_used:
unused_weights.append(lowercase )
logger.warning(f'''Unused weights: {unused_weights}''' )
return proj_weight
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = full_name.split("""conv_layers.""" )[-1]
_UpperCAmelCase = name.split(""".""" )
_UpperCAmelCase = int(items[0] )
_UpperCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_UpperCAmelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_UpperCAmelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_UpperCAmelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_UpperCAmelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowercase )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = emb.weight.shape
_UpperCAmelCase = nn.Linear(lowercase ,lowercase ,bias=lowercase )
_UpperCAmelCase = emb.weight.data
return lin_layer
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
with open(lowercase ,"""r""" ,encoding="""utf-8""" ) as f:
_UpperCAmelCase = f.readlines()
_UpperCAmelCase = [line.split(""" """ )[0] for line in lines]
_UpperCAmelCase = len(lowercase )
_UpperCAmelCase = {
"""<s>""": 0,
"""<pad>""": 1,
"""</s>""": 2,
"""<unk>""": 3,
}
vocab_dict.update(dict(zip(lowercase ,range(4 ,num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,):
"""simple docstring"""
_UpperCAmelCase = WavaVecaConfig.from_pretrained(lowercase )
_UpperCAmelCase = SpeechaTextaConfig.from_pretrained(
lowercase ,vocab_size=lowercase ,decoder_layers=lowercase ,do_stable_layer_norm=lowercase )
_UpperCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=1_60_00 ,padding_value=0 ,do_normalize=lowercase ,return_attention_mask=lowercase ,)
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
_UpperCAmelCase = model[0].eval()
# set weights for wav2vec2 encoder
_UpperCAmelCase = WavaVecaModel(lowercase )
_UpperCAmelCase = recursively_load_weights_wavaveca(model.encoder ,lowercase )
_UpperCAmelCase = SpeechaTextaForCausalLM(lowercase )
_UpperCAmelCase , _UpperCAmelCase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() ,strict=lowercase )
# set output linear layer
unexpected_keys.remove("""embed_out""" )
_UpperCAmelCase = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(f'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
_UpperCAmelCase = SpeechEncoderDecoderModel(encoder=lowercase ,decoder=lowercase )
_UpperCAmelCase = False
# add projection layer
_UpperCAmelCase = nn.Parameter(projection_layer.weight )
_UpperCAmelCase = nn.Parameter(projection_layer.bias )
_UpperCAmelCase = create_vocab_dict(lowercase )
with open(os.path.join(lowercase ,"""vocab.json""" ) ,"""w""" ) as fp:
json.dump(lowercase ,lowercase )
_UpperCAmelCase = SpeechaTextaTokenizer(os.path.join(lowercase ,"""vocab.json""" ) )
tokenizer.save_pretrained(lowercase )
_UpperCAmelCase = hf_wavavec.config.to_dict()
_UpperCAmelCase = tokenizer.pad_token_id
_UpperCAmelCase = tokenizer.bos_token_id
_UpperCAmelCase = tokenizer.eos_token_id
_UpperCAmelCase = """speech_to_text_2"""
_UpperCAmelCase = """wav2vec2"""
_UpperCAmelCase = SpeechEncoderDecoderConfig.from_dict(lowercase )
hf_wavavec.save_pretrained(lowercase )
feature_extractor.save_pretrained(lowercase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-large-lv60""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/s2t-small-mustc-en-fr-st""",
type=str,
help="""Path to hf decoder s2t checkpoint config""",
)
parser.add_argument("""--vocab_size""", default=1_0_2_2_4, type=int, help="""Vocab size of decoder""")
parser.add_argument("""--num_decoder_layers""", default=7, type=int, help="""Number of decoder layers""")
UpperCAmelCase__ = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 275
|
"""simple docstring"""
# Lint as: python3
import itertools
import os
import re
UpperCAmelCase__ = re.compile(r"""([A-Z]+)([A-Z][a-z])""")
UpperCAmelCase__ = re.compile(r"""([a-z\d])([A-Z])""")
UpperCAmelCase__ = re.compile(r"""(?<!_)_(?!_)""")
UpperCAmelCase__ = re.compile(r"""(_{2,})""")
UpperCAmelCase__ = r"""^\w+(\.\w+)*$"""
UpperCAmelCase__ = r"""<>:/\|?*"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = _uppercase_uppercase_re.sub(R"""\1_\2""" ,lowercase )
_UpperCAmelCase = _lowercase_uppercase_re.sub(R"""\1_\2""" ,lowercase )
return name.lower()
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = _single_underscore_re.split(lowercase )
_UpperCAmelCase = [_multiple_underscores_re.split(lowercase ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(lowercase ) if n != """""" )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
if os.path.basename(lowercase ) != name:
raise ValueError(f'''Should be a dataset name, not a path: {name}''' )
return camelcase_to_snakecase(lowercase )
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
if os.path.basename(lowercase ) != name:
raise ValueError(f'''Should be a dataset name, not a path: {name}''' )
if not re.match(_split_re ,lowercase ):
raise ValueError(f'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''' )
return f'''{filename_prefix_for_name(lowercase )}-{split}'''
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase=None ):
"""simple docstring"""
_UpperCAmelCase = filename_prefix_for_split(lowercase ,lowercase )
if filetype_suffix:
prefix += f'''.{filetype_suffix}'''
_UpperCAmelCase = os.path.join(lowercase ,lowercase )
return f'''{filepath}*'''
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase=None ,lowercase=None ):
"""simple docstring"""
_UpperCAmelCase = filename_prefix_for_split(lowercase ,lowercase )
_UpperCAmelCase = os.path.join(lowercase ,lowercase )
if shard_lengths:
_UpperCAmelCase = len(lowercase )
_UpperCAmelCase = [f'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(lowercase )]
if filetype_suffix:
_UpperCAmelCase = [filename + f'''.{filetype_suffix}''' for filename in filenames]
return filenames
else:
_UpperCAmelCase = prefix
if filetype_suffix:
filename += f'''.{filetype_suffix}'''
return [filename]
| 275
| 1
|
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class snake_case_ ( unittest.TestCase ):
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : int = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase ) )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Dict = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Dict = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
# Removed: 'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase ) )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
SCREAMING_SNAKE_CASE_ : Any = 'fp16'
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Dict = [
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
SCREAMING_SNAKE_CASE_ : Dict = 'fp16'
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def __A ( self ):
# pass variant but use the non-variant filenames
SCREAMING_SNAKE_CASE_ : List[Any] = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
SCREAMING_SNAKE_CASE_ : List[Any] = 'fp16'
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'fp16'
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
]
SCREAMING_SNAKE_CASE_ : int = 'fp16'
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def __A ( self ):
# pass variant but use the non-variant filenames
SCREAMING_SNAKE_CASE_ : Optional[int] = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
SCREAMING_SNAKE_CASE_ : Dict = 'fp16'
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
# 'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
SCREAMING_SNAKE_CASE_ : Dict = 'fp16'
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
| 345
|
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__: Dict = logging.get_logger(__name__)
lowerCAmelCase__: Optional[Any] = {
"snap-research/efficientformer-l1-300": (
"https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"
),
}
class snake_case_ ( lowerCAmelCase ):
__lowerCamelCase : Tuple = 'efficientformer'
def __init__( self , __lowerCAmelCase = [3, 2, 6, 4] , __lowerCAmelCase = [48, 96, 224, 448] , __lowerCAmelCase = [True, True, True, True] , __lowerCAmelCase = 448 , __lowerCAmelCase = 32 , __lowerCAmelCase = 4 , __lowerCAmelCase = 7 , __lowerCAmelCase = 5 , __lowerCAmelCase = 8 , __lowerCAmelCase = 4 , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 16 , __lowerCAmelCase = 3 , __lowerCAmelCase = 3 , __lowerCAmelCase = 3 , __lowerCAmelCase = 2 , __lowerCAmelCase = 1 , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 1 , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = 1e-5 , __lowerCAmelCase = "gelu" , __lowerCAmelCase = 0.02 , __lowerCAmelCase = 1e-12 , __lowerCAmelCase = 224 , __lowerCAmelCase = 1e-05 , **__lowerCAmelCase , ):
super().__init__(**__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_sizes
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : str = num_attention_heads
SCREAMING_SNAKE_CASE_ : Any = initializer_range
SCREAMING_SNAKE_CASE_ : Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Tuple = patch_size
SCREAMING_SNAKE_CASE_ : List[Any] = num_channels
SCREAMING_SNAKE_CASE_ : Tuple = depths
SCREAMING_SNAKE_CASE_ : Dict = mlp_expansion_ratio
SCREAMING_SNAKE_CASE_ : Union[str, Any] = downsamples
SCREAMING_SNAKE_CASE_ : Dict = dim
SCREAMING_SNAKE_CASE_ : str = key_dim
SCREAMING_SNAKE_CASE_ : Union[str, Any] = attention_ratio
SCREAMING_SNAKE_CASE_ : List[str] = resolution
SCREAMING_SNAKE_CASE_ : List[str] = pool_size
SCREAMING_SNAKE_CASE_ : int = downsample_patch_size
SCREAMING_SNAKE_CASE_ : Any = downsample_stride
SCREAMING_SNAKE_CASE_ : str = downsample_pad
SCREAMING_SNAKE_CASE_ : Dict = drop_path_rate
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_metaad_blocks
SCREAMING_SNAKE_CASE_ : List[Any] = distillation
SCREAMING_SNAKE_CASE_ : Optional[int] = use_layer_scale
SCREAMING_SNAKE_CASE_ : List[str] = layer_scale_init_value
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_size
SCREAMING_SNAKE_CASE_ : Dict = batch_norm_eps
| 345
| 1
|
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__SCREAMING_SNAKE_CASE ="""src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
__SCREAMING_SNAKE_CASE =importlib.util.spec_from_file_location(
"""transformers""",
os.path.join(PATH_TO_TRANSFORMERS, """__init__.py"""),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
__SCREAMING_SNAKE_CASE =spec.loader.load_module()
__SCREAMING_SNAKE_CASE =transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__SCREAMING_SNAKE_CASE =re.compile("""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
__SCREAMING_SNAKE_CASE ={
"""CLIPConfigMixin""",
"""DecisionTransformerConfigMixin""",
"""EncoderDecoderConfigMixin""",
"""RagConfigMixin""",
"""SpeechEncoderDecoderConfigMixin""",
"""VisionEncoderDecoderConfigMixin""",
"""VisionTextDualEncoderConfigMixin""",
}
def a ():
SCREAMING_SNAKE_CASE_ = []
for config_class in list(CONFIG_MAPPING.values() ):
SCREAMING_SNAKE_CASE_ = False
# source code of `config_class`
SCREAMING_SNAKE_CASE_ = inspect.getsource(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = _re_checkpoint.findall(_lowerCAmelCase )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
SCREAMING_SNAKE_CASE_ = F"https://huggingface.co/{ckpt_name}"
if ckpt_link == ckpt_link_from_name:
SCREAMING_SNAKE_CASE_ = True
break
SCREAMING_SNAKE_CASE_ = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
SCREAMING_SNAKE_CASE_ = '''\n'''.join(sorted(_lowerCAmelCase ) )
raise ValueError(F"The following configurations don't contain any valid checkpoint:\n{message}" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 89
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class __magic_name__ ( __UpperCAmelCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = "realm"
def __init__( self: Tuple , _lowerCamelCase: Union[str, Any]=3_05_22 , _lowerCamelCase: Tuple=7_68 , _lowerCamelCase: str=1_28 , _lowerCamelCase: str=12 , _lowerCamelCase: int=12 , _lowerCamelCase: Union[str, Any]=8 , _lowerCamelCase: Optional[Any]=30_72 , _lowerCamelCase: str="gelu_new" , _lowerCamelCase: str=0.1 , _lowerCamelCase: Union[str, Any]=0.1 , _lowerCamelCase: Optional[int]=5_12 , _lowerCamelCase: Union[str, Any]=2 , _lowerCamelCase: int=0.02 , _lowerCamelCase: Tuple=1E-12 , _lowerCamelCase: List[Any]=2_56 , _lowerCamelCase: Any=10 , _lowerCamelCase: Optional[Any]=1E-3 , _lowerCamelCase: Any=5 , _lowerCamelCase: List[str]=3_20 , _lowerCamelCase: List[str]=13_35_37_18 , _lowerCamelCase: str=50_00 , _lowerCamelCase: str=1 , _lowerCamelCase: str=0 , _lowerCamelCase: Dict=2 , **_lowerCamelCase: Tuple , ):
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
# Common config
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = retriever_proj_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = num_candidates
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = layer_norm_eps
# Reader config
SCREAMING_SNAKE_CASE_ = span_hidden_size
SCREAMING_SNAKE_CASE_ = max_span_width
SCREAMING_SNAKE_CASE_ = reader_layer_norm_eps
SCREAMING_SNAKE_CASE_ = reader_beam_size
SCREAMING_SNAKE_CASE_ = reader_seq_len
# Retrieval config
SCREAMING_SNAKE_CASE_ = num_block_records
SCREAMING_SNAKE_CASE_ = searcher_beam_size
| 89
| 1
|
import random
from typing import Any
def UpperCamelCase ( _a ) -> list[Any]:
'''simple docstring'''
for _ in range(len(_a ) ):
lowercase_ :Union[str, Any] = random.randint(0 , len(_a ) - 1 )
lowercase_ :str = random.randint(0 , len(_a ) - 1 )
lowercase_ , lowercase_ :List[str] = data[b], data[a]
return data
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[int] = [0, 1, 2, 3, 4, 5, 6, 7]
SCREAMING_SNAKE_CASE : Union[str, Any] = ["python", "says", "hello", "!"]
print("Fisher-Yates Shuffle:")
print("List", integers, strings)
print("FY Shuffle", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 257
|
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
SCREAMING_SNAKE_CASE : str = {
"distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"bert": (BertConfig, BertForMaskedLM, BertTokenizer),
"gpt2": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def UpperCamelCase ( _a ) -> int:
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def UpperCamelCase ( _a , _a ) -> Any:
'''simple docstring'''
if args.student_type == "roberta":
lowercase_ :List[str] = False
elif args.student_type == "gpt2":
lowercase_ :Optional[int] = False
def UpperCamelCase ( _a , _a ) -> str:
'''simple docstring'''
if args.student_type == "roberta":
lowercase_ :int = False
def UpperCamelCase ( ) -> int:
'''simple docstring'''
lowercase_ :Optional[int] = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=_a , required=_a , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=_a , required=_a , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=_a , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=_a , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=_a , required=_a , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=_a , type=_a , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=_a , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=_a , required=_a , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=_a , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=_a , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=_a , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=_a , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=_a , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=_a , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=_a , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=_a , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=_a , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=_a , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=_a , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=_a , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=_a , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=_a , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=_a , default=5_0 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=_a , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=_a , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=_a , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=_a , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=_a , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=_a , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=_a , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=_a , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=_a , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=_a , default=5_6 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=_a , default=5_0_0 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=_a , default=4_0_0_0 , help='''Checkpoint interval.''' )
lowercase_ :Union[str, Any] = parser.parse_args()
sanity_checks(_a )
# ARGS #
init_gpu_params(_a )
set_seed(_a )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f"Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f"Experiment will be dumped and logged in {args.dump_path}" )
# SAVE PARAMS #
logger.info(f"Param: {args}" )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(_a ) , _a , indent=4 )
git_log(args.dump_path )
lowercase_ , lowercase_ , lowercase_ :Dict = MODEL_CLASSES[args.student_type]
lowercase_ , lowercase_ , lowercase_ :Dict = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
lowercase_ :Union[str, Any] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
lowercase_ :Optional[Any] = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
lowercase_ :List[str] = tokenizer.all_special_tokens.index(_a )
lowercase_ :Union[str, Any] = tokenizer.all_special_ids[idx]
logger.info(f"Special tokens {special_tok_ids}" )
lowercase_ :Dict = special_tok_ids
lowercase_ :List[str] = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f"Loading data from {args.data_file}" )
with open(args.data_file , '''rb''' ) as fp:
lowercase_ :Tuple = pickle.load(_a )
if args.mlm:
logger.info(f"Loading token counts from {args.token_counts} (already pre-computed)" )
with open(args.token_counts , '''rb''' ) as fp:
lowercase_ :List[Any] = pickle.load(_a )
lowercase_ :Tuple = np.maximum(_a , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
lowercase_ :List[Any] = 0.0 # do not predict special tokens
lowercase_ :Dict = torch.from_numpy(_a )
else:
lowercase_ :Tuple = None
lowercase_ :List[Any] = LmSeqsDataset(params=_a , data=_a )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(f"Loading student config from {args.student_config}" )
lowercase_ :Union[str, Any] = student_config_class.from_pretrained(args.student_config )
lowercase_ :int = True
if args.student_pretrained_weights is not None:
logger.info(f"Loading pretrained weights from {args.student_pretrained_weights}" )
lowercase_ :List[Any] = student_model_class.from_pretrained(args.student_pretrained_weights , config=_a )
else:
lowercase_ :Dict = student_model_class(_a )
if args.n_gpu > 0:
student.to(f"cuda:{args.local_rank}" )
logger.info('''Student loaded.''' )
# TEACHER #
lowercase_ :int = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=_a )
if args.n_gpu > 0:
teacher.to(f"cuda:{args.local_rank}" )
logger.info(f"Teacher loaded from {args.teacher_name}." )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(_a , _a )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(_a , _a )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
lowercase_ :Tuple = Distiller(
params=_a , dataset=_a , token_probs=_a , student=_a , teacher=_a )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 257
| 1
|
'''simple docstring'''
import numpy as np
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = int(np.ceil((x_end - xa) / h ) )
lowercase_ : int = np.zeros((n + 1,) )
lowercase_ : Union[str, Any] = ya
lowercase_ : Optional[int] = xa
for k in range(_UpperCamelCase ):
lowercase_ : List[Any] = f(_UpperCamelCase , y[k] )
lowercase_ : int = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
lowercase_ : Union[str, Any] = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
lowercase_ : List[str] = f(x + h , y[k] + h * ka )
lowercase_ : Dict = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 640
|
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
UpperCamelCase__ = ['text', 'image', 'audio']
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[Any] = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
inputs.append(create_inputs(_UpperCamelCase ) )
else:
raise ValueError(F"""Invalid type requested: {input_type}""" )
return inputs
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = []
for output in outputs:
if isinstance(_UpperCamelCase , (str, AgentText) ):
output_types.append("text" )
elif isinstance(_UpperCamelCase , (Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(_UpperCamelCase , (torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(F"""Invalid output: {output}""" )
return output_types
@is_tool_test
class _UpperCAmelCase :
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "inputs" ) )
self.assertTrue(hasattr(self.tool , "outputs" ) )
lowercase_ : Optional[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , a ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
lowercase_ : Any = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : List[str] = create_inputs(self.tool.inputs )
lowercase_ : List[str] = self.tool(*a )
# There is a single output
if len(self.tool.outputs ) == 1:
lowercase_ : Union[str, Any] = [outputs]
self.assertListEqual(output_types(a ) , self.tool.outputs )
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "description" ) )
self.assertTrue(hasattr(self.tool , "default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : Any = create_inputs(self.tool.inputs )
lowercase_ : str = self.tool(*a )
if not isinstance(a , a ):
lowercase_ : List[Any] = [outputs]
self.assertEqual(len(a ) , len(self.tool.outputs ) )
for output, output_type in zip(a , self.tool.outputs ):
lowercase_ : int = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(a , a ) )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : Dict = create_inputs(self.tool.inputs )
lowercase_ : Optional[int] = []
for _input, input_type in zip(a , self.tool.inputs ):
if isinstance(a , a ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
lowercase_ : Any = self.tool(*a )
if not isinstance(a , a ):
lowercase_ : Any = [outputs]
self.assertEqual(len(a ) , len(self.tool.outputs ) )
| 640
| 1
|
"""simple docstring"""
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
__UpperCamelCase : str = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
__UpperCamelCase : str = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
__UpperCamelCase : Optional[Any] = {
'''abeja/gpt-neox-japanese-2.7b''': 2048,
}
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Any ):
with open(_UpperCAmelCase , 'r' , encoding='utf-8' ) as f:
lowerCAmelCase = json.loads(f.read() )
lowerCAmelCase = collections.OrderedDict()
lowerCAmelCase = collections.OrderedDict()
lowerCAmelCase = collections.OrderedDict()
with open(_UpperCAmelCase , 'r' , encoding='utf-8' ) as f:
lowerCAmelCase = f.readlines()
lowerCAmelCase = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(_UpperCAmelCase ):
lowerCAmelCase = b
lowerCAmelCase = idx
for wd in b:
lowerCAmelCase = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class a ( a__ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ['''input_ids''', '''attention_mask''']
def __init__( self , _snake_case , _snake_case , _snake_case="<|endoftext|>" , _snake_case="<|endoftext|>" , _snake_case="<|startoftext|>" , _snake_case="<|endoftext|>" , _snake_case=False , **_snake_case , ):
"""simple docstring"""
super().__init__(
unk_token=_snake_case , pad_token=_snake_case , bos_token=_snake_case , eos_token=_snake_case , do_clean_text=_snake_case , **_snake_case , )
if not os.path.isfile(_snake_case ):
raise ValueError(
F'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
if not os.path.isfile(_snake_case ):
raise ValueError(
F'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
lowerCAmelCase = do_clean_text
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = load_vocab_and_emoji(_snake_case , _snake_case )
lowerCAmelCase = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return len(self.raw_vocab )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.subword_tokenizer.tokenize(_snake_case , clean=self.do_clean_text )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.vocab.get(_snake_case , self.vocab.get(self.unk_token ) )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(_snake_case )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = ''.join(_snake_case ).strip()
return out_string
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_snake_case , add_special_tokens=_snake_case ) + [self.eos_token_id] )
if len(_snake_case ) > self.model_max_length:
lowerCAmelCase = input_ids[-self.model_max_length :]
return input_ids
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = 0
if os.path.isdir(_snake_case ):
lowerCAmelCase = os.path.join(
_snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase = os.path.join(
_snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] )
else:
lowerCAmelCase = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
lowerCAmelCase = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(_snake_case , 'w' , encoding='utf-8' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!' )
lowerCAmelCase = token_index
writer.write(','.join(_snake_case ) + '\n' )
index += 1
with open(_snake_case , 'w' , encoding='utf-8' ) as writer:
json.dump(self.emoji , _snake_case )
return vocab_file, emoji_file
class a ( a__ ):
def __init__( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = vocab # same as swe
lowerCAmelCase = ids_to_tokens # same as bpe
lowerCAmelCase = emoji
lowerCAmelCase = np.max([len(_snake_case ) for w in self.vocab.keys()] )
lowerCAmelCase = re.compile(r'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' )
lowerCAmelCase = re.compile(r'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' )
lowerCAmelCase = re.compile(r'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' )
lowerCAmelCase = re.compile(
r'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
lowerCAmelCase = re.compile(
r'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
lowerCAmelCase = re.compile(
r'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' )
lowerCAmelCase = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
lowerCAmelCase = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
lowerCAmelCase = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} )
def __len__( self ):
"""simple docstring"""
return len(self.ids_to_tokens )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.content_repattera.sub('<URL>' , _snake_case )
lowerCAmelCase = self.content_repattera.sub('<EMAIL>' , _snake_case )
lowerCAmelCase = self.content_repattera.sub('<TEL>' , _snake_case )
lowerCAmelCase = self.content_repattera.sub('<DATE>' , _snake_case )
lowerCAmelCase = self.content_repattera.sub('<DATE>' , _snake_case )
lowerCAmelCase = self.content_repattera.sub('<PRICE>' , _snake_case )
lowerCAmelCase = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
lowerCAmelCase = content.replace('<BLOCK><BLOCK>' , '<BLOCK>' )
return content
def UpperCamelCase__ ( self , _snake_case , _snake_case=False ):
"""simple docstring"""
lowerCAmelCase = text.replace(' ' , '<SP>' )
lowerCAmelCase = text.replace(' ' , '<SP>' )
lowerCAmelCase = text.replace('\r\n' , '<BR>' )
lowerCAmelCase = text.replace('\n' , '<BR>' )
lowerCAmelCase = text.replace('\r' , '<BR>' )
lowerCAmelCase = text.replace('\t' , '<TAB>' )
lowerCAmelCase = text.replace('—' , 'ー' )
lowerCAmelCase = text.replace('−' , 'ー' )
for k, v in self.emoji["emoji"].items():
if k in text:
lowerCAmelCase = text.replace(_snake_case , _snake_case )
if clean:
lowerCAmelCase = self.clean_text(_snake_case )
def check_simbol(_snake_case ):
lowerCAmelCase = x.encode()
if len(_snake_case ) == 1 and len(_snake_case ) == 2:
lowerCAmelCase = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xc_2a1 and c <= 0xc_2bf)
or (c >= 0xc_780 and c <= 0xc_783)
or (c >= 0xc_ab9 and c <= 0xc_bbf)
or (c >= 0xc_c80 and c <= 0xc_da2)
):
return True
return False
def checkuae(_snake_case ):
lowerCAmelCase = x.encode()
if len(_snake_case ) == 1 and len(_snake_case ) == 3:
lowerCAmelCase = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xe28_080 and c <= 0xe2b_07f:
return True
return False
lowerCAmelCase = 0
lowerCAmelCase = []
while pos < len(_snake_case ):
lowerCAmelCase = min(len(_snake_case ) , pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
lowerCAmelCase = [] # (token_id, token, pos)
for e in range(_snake_case , _snake_case , -1 ):
lowerCAmelCase = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(_snake_case ) > 2:
lowerCAmelCase = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(_snake_case ) > 0:
# the smallest token_id is adopted
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = sorted(_snake_case , key=lambda _snake_case : x[0] )[0]
result.append(_snake_case )
lowerCAmelCase = e
else:
lowerCAmelCase = pos + 1
lowerCAmelCase = text[pos:end]
if check_simbol(_snake_case ):
result.append('<KIGOU>' )
elif checkuae(_snake_case ):
result.append('<U2000U2BFF>' )
else:
for i in wd.encode('utf-8' ):
result.append('<|byte%d|>' % i )
lowerCAmelCase = end
return result
def UpperCamelCase__ ( self , _snake_case , _snake_case="\n" ):
"""simple docstring"""
lowerCAmelCase = []
lowerCAmelCase = []
lowerCAmelCase = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(_snake_case ) > 0:
words.append(bytearray(_snake_case ).decode('utf-8' , errors='replace' ) )
lowerCAmelCase = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word] )
elif word == "<SP>":
words.append(' ' )
elif word == "<BR>":
words.append(_snake_case )
elif word == "<TAB>":
words.append('\t' )
elif word == "<BLOCK>":
words.append('▀' )
elif word == "<KIGOU>":
words.append('ǀ' )
elif word == "<U2000U2BFF>":
words.append('‖' )
else:
words.append(_snake_case )
if len(_snake_case ) > 0:
words.append(bytearray(_snake_case ).decode('utf-8' , errors='replace' ) )
lowerCAmelCase = ''.join(_snake_case )
return text
| 4
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase : Dict ={"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Optional[int] =[
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_UpperCamelCase : Optional[int] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 316
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase : List[str] = {
"configuration_biogpt": ["BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BioGptConfig"],
"tokenization_biogpt": ["BioGptTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] = [
"BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BioGptForCausalLM",
"BioGptForTokenClassification",
"BioGptForSequenceClassification",
"BioGptModel",
"BioGptPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 546
|
from __future__ import annotations
import requests
def _lowerCAmelCase ( UpperCamelCase__: str ) -> dict:
"""simple docstring"""
A = f'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'
return requests.get(UpperCamelCase__ ).json()
def _lowerCAmelCase ( UpperCamelCase__: int = 10 ) -> list[dict]:
"""simple docstring"""
A = """https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"""
A = requests.get(UpperCamelCase__ ).json()[:max_stories]
return [get_hackernews_story(UpperCamelCase__ ) for story_id in story_ids]
def _lowerCAmelCase ( UpperCamelCase__: int = 10 ) -> str:
"""simple docstring"""
A = hackernews_top_stories(UpperCamelCase__ )
return "\n".join("""* [{title}]({url})""".format(**UpperCamelCase__ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 546
| 1
|
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_a : List[str] = 1_6
_a : int = 3_2
def _lowerCAmelCase ( lowercase , lowercase = 16 ) -> List[Any]:
__lowerCAmelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" )
__lowerCAmelCase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
__lowerCAmelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase , max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__lowerCAmelCase = datasets.map(
lowercase , batched=lowercase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCAmelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__lowerCAmelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__lowerCAmelCase = 16
elif accelerator.mixed_precision != "no":
__lowerCAmelCase = 8
else:
__lowerCAmelCase = None
return tokenizer.pad(
lowercase , padding="""longest""" , max_length=lowercase , pad_to_multiple_of=lowercase , return_tensors="""pt""" , )
# Instantiate dataloaders.
__lowerCAmelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
__lowerCAmelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_a : Any = mocked_dataloaders # noqa: F811
def _lowerCAmelCase ( lowercase , lowercase ) -> Tuple:
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowercase ) == "1":
__lowerCAmelCase = 2
# Initialize accelerator
__lowerCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCAmelCase = config["""lr"""]
__lowerCAmelCase = int(config["""num_epochs"""] )
__lowerCAmelCase = int(config["""seed"""] )
__lowerCAmelCase = int(config["""batch_size"""] )
__lowerCAmelCase = evaluate.load("""glue""" , """mrpc""" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=lowercase )
def inner_training_loop(lowercase ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowerCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
__lowerCAmelCase = AdamW(params=model.parameters() , lr=lowercase )
__lowerCAmelCase , __lowerCAmelCase = get_dataloaders(lowercase , lowercase )
# Instantiate scheduler
__lowerCAmelCase = get_linear_schedule_with_warmup(
optimizer=lowercase , num_warmup_steps=100 , num_training_steps=(len(lowercase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(
lowercase , lowercase , lowercase , lowercase , lowercase )
# Now we train the model
for epoch in range(lowercase ):
model.train()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__lowerCAmelCase = model(**lowercase )
__lowerCAmelCase = outputs.loss
accelerator.backward(lowercase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCAmelCase = model(**lowercase )
__lowerCAmelCase = outputs.logits.argmax(dim=-1 )
__lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowercase , references=lowercase , )
__lowerCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , lowercase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def _lowerCAmelCase ( ) -> int:
__lowerCAmelCase = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowercase , default=lowercase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowercase , lowercase )
if __name__ == "__main__":
main()
| 689
|
'''simple docstring'''
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : Union[str, Any] =["""image_processor"""]
a : Dict ="""SamImageProcessor"""
def __init__( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.image_processor
__lowerCAmelCase = -10
__lowerCAmelCase = self.image_processor.size["""longest_edge"""]
def __call__( self,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE = None,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = self.image_processor(
__SCREAMING_SNAKE_CASE,return_tensors=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE,)
# pop arguments that are not used in the foward but used nevertheless
__lowerCAmelCase = encoding_image_processor["""original_sizes"""]
if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ): # Checks if Torch or TF tensor
__lowerCAmelCase = original_sizes.numpy()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self._check_and_preprocess_points(
input_points=__SCREAMING_SNAKE_CASE,input_labels=__SCREAMING_SNAKE_CASE,input_boxes=__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = self._normalize_and_convert(
__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,input_points=__SCREAMING_SNAKE_CASE,input_labels=__SCREAMING_SNAKE_CASE,input_boxes=__SCREAMING_SNAKE_CASE,return_tensors=__SCREAMING_SNAKE_CASE,)
return encoding_image_processor
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="pt",):
'''simple docstring'''
if input_points is not None:
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,original_sizes[0] ) for point in input_points
]
else:
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
for point, original_size in zip(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
__lowerCAmelCase , __lowerCAmelCase = self._pad_points_and_labels(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE )
if input_labels is not None:
__lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE )
if input_boxes is not None:
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,original_sizes[0],is_bounding_box=__SCREAMING_SNAKE_CASE )
for box in input_boxes
]
else:
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,is_bounding_box=__SCREAMING_SNAKE_CASE )
for box, original_size in zip(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
]
__lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE )
if input_boxes is not None:
if return_tensors == "pt":
__lowerCAmelCase = torch.from_numpy(__SCREAMING_SNAKE_CASE )
# boxes batch size of 1 by default
__lowerCAmelCase = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
__lowerCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE )
# boxes batch size of 1 by default
__lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"""input_boxes""": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
__lowerCAmelCase = torch.from_numpy(__SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
__lowerCAmelCase = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
__lowerCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
__lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"""input_points""": input_points} )
if input_labels is not None:
if return_tensors == "pt":
__lowerCAmelCase = torch.from_numpy(__SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
__lowerCAmelCase = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
__lowerCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
__lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE,1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"""input_labels""": input_labels} )
return encoding_image_processor
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = max([point.shape[0] for point in input_points] )
__lowerCAmelCase = []
for i, point in enumerate(__SCREAMING_SNAKE_CASE ):
if point.shape[0] != expected_nb_points:
__lowerCAmelCase = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value],axis=0 )
__lowerCAmelCase = np.append(input_labels[i],[self.point_pad_value] )
processed_input_points.append(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = processed_input_points
return input_points, input_labels
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = original_size
__lowerCAmelCase , __lowerCAmelCase = self.image_processor._get_preprocess_shape(__SCREAMING_SNAKE_CASE,longest_edge=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = deepcopy(__SCREAMING_SNAKE_CASE ).astype(__SCREAMING_SNAKE_CASE )
if is_bounding_box:
__lowerCAmelCase = coords.reshape(-1,2,2 )
__lowerCAmelCase = coords[..., 0] * (new_w / old_w)
__lowerCAmelCase = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
__lowerCAmelCase = coords.reshape(-1,4 )
return coords
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,):
'''simple docstring'''
if input_points is not None:
if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ): # Checks for TF or Torch tensor
__lowerCAmelCase = input_points.numpy().tolist()
if not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) or not isinstance(input_points[0],__SCREAMING_SNAKE_CASE ):
raise ValueError("""Input points must be a list of list of floating points.""" )
__lowerCAmelCase = [np.array(__SCREAMING_SNAKE_CASE ) for input_point in input_points]
else:
__lowerCAmelCase = None
if input_labels is not None:
if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ):
__lowerCAmelCase = input_labels.numpy().tolist()
if not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) or not isinstance(input_labels[0],__SCREAMING_SNAKE_CASE ):
raise ValueError("""Input labels must be a list of list integers.""" )
__lowerCAmelCase = [np.array(__SCREAMING_SNAKE_CASE ) for label in input_labels]
else:
__lowerCAmelCase = None
if input_boxes is not None:
if hasattr(__SCREAMING_SNAKE_CASE,"""numpy""" ):
__lowerCAmelCase = input_boxes.numpy().tolist()
if (
not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
or not isinstance(input_boxes[0],__SCREAMING_SNAKE_CASE )
or not isinstance(input_boxes[0][0],__SCREAMING_SNAKE_CASE )
):
raise ValueError("""Input boxes must be a list of list of list of floating points.""" )
__lowerCAmelCase = [np.array(__SCREAMING_SNAKE_CASE ).astype(np.floataa ) for box in input_boxes]
else:
__lowerCAmelCase = None
return input_points, input_labels, input_boxes
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(__SCREAMING_SNAKE_CASE ) )
def lowerCamelCase__ ( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.image_processor.post_process_masks(*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
| 689
| 1
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : int = """dandelin/vilt-b32-finetuned-vqa"""
a : Tuple = (
"""This is a tool that answers a question about an image. It takes an input named `image` which should be the """
"""image containing the information, as well as a `question` which should be the question in English. It """
"""returns a text that is the answer to the question."""
)
a : Optional[Any] = """image_qa"""
a : Tuple = AutoProcessor
a : Optional[Any] = AutoModelForVisualQuestionAnswering
a : Union[str, Any] = ["""image""", """text"""]
a : Union[str, Any] = ["""text"""]
def __init__( self , *UpperCamelCase , **UpperCamelCase ) -> Tuple:
requires_backends(self , ["vision"] )
super().__init__(*UpperCamelCase , **UpperCamelCase )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase ) -> int:
return self.pre_processor(UpperCamelCase , UpperCamelCase , return_tensors="pt" )
def UpperCAmelCase_ ( self , UpperCamelCase ) -> int:
with torch.no_grad():
return self.model(**UpperCamelCase ).logits
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Union[str, Any]:
__lowerCAmelCase = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 39
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : Union[str, Any] = {
'''configuration_distilbert''': [
'''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''DistilBertConfig''',
'''DistilBertOnnxConfig''',
],
'''tokenization_distilbert''': ['''DistilBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple = ['''DistilBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple = [
'''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DistilBertForMaskedLM''',
'''DistilBertForMultipleChoice''',
'''DistilBertForQuestionAnswering''',
'''DistilBertForSequenceClassification''',
'''DistilBertForTokenClassification''',
'''DistilBertModel''',
'''DistilBertPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = [
'''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDistilBertForMaskedLM''',
'''TFDistilBertForMultipleChoice''',
'''TFDistilBertForQuestionAnswering''',
'''TFDistilBertForSequenceClassification''',
'''TFDistilBertForTokenClassification''',
'''TFDistilBertMainLayer''',
'''TFDistilBertModel''',
'''TFDistilBertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = [
'''FlaxDistilBertForMaskedLM''',
'''FlaxDistilBertForMultipleChoice''',
'''FlaxDistilBertForQuestionAnswering''',
'''FlaxDistilBertForSequenceClassification''',
'''FlaxDistilBertForTokenClassification''',
'''FlaxDistilBertModel''',
'''FlaxDistilBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 39
| 1
|
"""simple docstring"""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
UpperCAmelCase = logging.get_logger(__name__)
class lowercase__ ( A_ ):
__UpperCAmelCase = ['''pixel_values''']
def __init__( self , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = 1 / 255 , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = 8 , **SCREAMING_SNAKE_CASE , ) -> None:
super().__init__(**SCREAMING_SNAKE_CASE)
_lowerCamelCase : Optional[int] = do_rescale
_lowerCamelCase : Optional[Any] = rescale_factor
_lowerCamelCase : Any = do_pad
_lowerCamelCase : Tuple = pad_size
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE) -> np.ndarray:
return rescale(SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None) -> List[Any]:
_lowerCamelCase , _lowerCamelCase : str = get_image_size(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Tuple = (old_height // size + 1) * size - old_height
_lowerCamelCase : Any = (old_width // size + 1) * size - old_width
return pad(SCREAMING_SNAKE_CASE , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE , ) -> Dict:
_lowerCamelCase : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
_lowerCamelCase : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCamelCase : Optional[int] = do_pad if do_pad is not None else self.do_pad
_lowerCamelCase : Any = pad_size if pad_size is not None else self.pad_size
_lowerCamelCase : List[str] = make_list_of_images(SCREAMING_SNAKE_CASE)
if not valid_images(SCREAMING_SNAKE_CASE):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""")
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""")
# All transformations expect numpy arrays.
_lowerCamelCase : List[Any] = [to_numpy_array(SCREAMING_SNAKE_CASE) for image in images]
if do_rescale:
_lowerCamelCase : Dict = [self.rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE) for image in images]
if do_pad:
_lowerCamelCase : Optional[int] = [self.pad(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE) for image in images]
_lowerCamelCase : List[Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) for image in images]
_lowerCamelCase : Dict = {"""pixel_values""": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE)
| 88
|
"""simple docstring"""
def lowerCamelCase ( _snake_case ,_snake_case ):
return int((input_a, input_a).count(0 ) == 0 )
def lowerCamelCase ( ):
assert and_gate(0 ,0 ) == 0
assert and_gate(0 ,1 ) == 0
assert and_gate(1 ,0 ) == 0
assert and_gate(1 ,1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 110
| 0
|
__lowerCamelCase : Dict = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__lowerCamelCase : Optional[int] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__lowerCamelCase : List[Any] = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 721
|
from sklearn.metrics import mean_squared_error
import datasets
__lowerCamelCase : List[str] = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
__lowerCamelCase : Optional[int] = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
__lowerCamelCase : str = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'''
] , )
def UpperCAmelCase__ ( self : List[Any] ) -> Any:
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('''float''' ) ),
"references": datasets.Sequence(datasets.Value('''float''' ) ),
}
else:
return {
"predictions": datasets.Value('''float''' ),
"references": datasets.Value('''float''' ),
}
def UpperCAmelCase__ ( self : Union[str, Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : int , lowerCamelCase_ : int=None , lowerCamelCase_ : List[Any]="uniform_average" , lowerCamelCase_ : List[str]=True ) -> List[str]:
__magic_name__ : Tuple = mean_squared_error(
lowerCamelCase_ , lowerCamelCase_ , sample_weight=lowerCamelCase_ , multioutput=lowerCamelCase_ , squared=lowerCamelCase_ )
return {"mse": mse}
| 501
| 0
|
'''simple docstring'''
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =MgpstrTokenizer
_lowerCamelCase =False
_lowerCamelCase ={}
_lowerCamelCase =False
def __snake_case ( self : str ):
super().setUp()
# fmt: off
UpperCAmelCase = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
UpperCAmelCase = dict(zip(a__ , range(len(a__ ) ) ) )
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(a__ ) + '''\n''' )
def __snake_case ( self : Optional[Any] , **a__ : Dict ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **a__ )
def __snake_case ( self : List[str] , a__ : Dict ):
UpperCAmelCase = '''tester'''
UpperCAmelCase = '''tester'''
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''' )
def __snake_case ( self : Any ):
pass
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase = self.get_tokenizers(do_lower_case=a__ )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
UpperCAmelCase = '''[SPECIAL_TOKEN]'''
tokenizer.add_special_tokens({'''cls_token''': special_token} )
UpperCAmelCase = tokenizer.encode([special_token] , add_special_tokens=a__ )
self.assertEqual(len(a__ ) , 1 )
UpperCAmelCase = tokenizer.decode(a__ , skip_special_tokens=a__ )
self.assertTrue(special_token not in decoded )
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
UpperCAmelCase, UpperCAmelCase = self.get_input_output_texts(a__ )
UpperCAmelCase = tokenizer.tokenize(a__ )
UpperCAmelCase = tokenizer.convert_tokens_to_ids(a__ )
UpperCAmelCase = tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(a__ )
self.assertNotEqual(len(a__ ) , 0 )
UpperCAmelCase = tokenizer.decode(a__ )
self.assertIsInstance(a__ , a__ )
self.assertEqual(text_a.replace(''' ''' , '''''' ) , a__ )
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''' )
def __snake_case ( self : Any ):
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' )
def __snake_case ( self : Tuple ):
pass
| 51
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.linear_k""": """encoder.layers.*.self_attn.linear_k""",
"""self_attn.linear_v""": """encoder.layers.*.self_attn.linear_v""",
"""self_attn.linear_q""": """encoder.layers.*.self_attn.linear_q""",
"""self_attn.pos_bias_u""": """encoder.layers.*.self_attn.pos_bias_u""",
"""self_attn.pos_bias_v""": """encoder.layers.*.self_attn.pos_bias_v""",
"""self_attn.linear_out""": """encoder.layers.*.self_attn.linear_out""",
"""self_attn.linear_pos""": """encoder.layers.*.self_attn.linear_pos""",
"""self_attn.rotary_emb""": """encoder.embed_positions""",
"""self_attn_layer_norm""": """encoder.layers.*.self_attn_layer_norm""",
"""conv_module.pointwise_conv1""": """encoder.layers.*.conv_module.pointwise_conv1""",
"""conv_module.pointwise_conv2""": """encoder.layers.*.conv_module.pointwise_conv2""",
"""conv_module.depthwise_conv""": """encoder.layers.*.conv_module.depthwise_conv""",
"""conv_module.batch_norm""": """encoder.layers.*.conv_module.batch_norm""",
"""conv_module.layer_norm""": """encoder.layers.*.conv_module.layer_norm""",
"""ffn1.w_1""": """encoder.layers.*.ffn1.intermediate_dense""",
"""ffn1.w_2""": """encoder.layers.*.ffn1.output_dense""",
"""ffn1.layer_norm""": """encoder.layers.*.ffn1_layer_norm""",
"""ffn2.w_1""": """encoder.layers.*.ffn2.intermediate_dense""",
"""ffn2.w_2""": """encoder.layers.*.ffn2.output_dense""",
"""ffn2.layer_norm""": """encoder.layers.*.ffn2_layer_norm""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
UpperCamelCase_ = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ) -> str:
for attribute in key.split('''.''' ):
lowercase : Tuple =getattr(__magic_name__ , __magic_name__ )
if weight_type is not None:
lowercase : Optional[int] =getattr(__magic_name__ , __magic_name__ ).shape
else:
lowercase : List[Any] =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowercase : Any =value
elif weight_type == "weight_g":
lowercase : List[Any] =value
elif weight_type == "weight_v":
lowercase : Union[str, Any] =value
elif weight_type == "bias":
lowercase : Tuple =value
elif weight_type == "running_mean":
lowercase : Union[str, Any] =value
elif weight_type == "running_var":
lowercase : str =value
elif weight_type == "num_batches_tracked":
lowercase : Tuple =value
elif weight_type == "inv_freq":
lowercase : Optional[Any] =value
else:
lowercase : Tuple =value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def _lowerCAmelCase ( __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : str ) -> Union[str, Any]:
lowercase : Optional[int] =[]
lowercase : Tuple =fairseq_model.state_dict()
lowercase : List[Any] =hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
lowercase : Tuple =False
if "conv_layers" in name:
load_conv_layer(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , hf_model.config.feat_extract_norm == '''group''' , )
lowercase : List[Any] =True
else:
for key, mapped_key in MAPPING.items():
lowercase : Optional[int] ='''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowercase : Union[str, Any] =True
if "*" in mapped_key:
lowercase : Optional[int] =name.split(__magic_name__ )[0].split('''.''' )[-2]
lowercase : List[str] =mapped_key.replace('''*''' , __magic_name__ )
if "pos_bias_u" in name:
lowercase : Optional[Any] =None
elif "pos_bias_v" in name:
lowercase : Union[str, Any] =None
elif "weight_g" in name:
lowercase : Any ='''weight_g'''
elif "weight_v" in name:
lowercase : Tuple ='''weight_v'''
elif "bias" in name:
lowercase : Optional[int] ='''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase : Optional[int] ='''weight'''
elif "running_mean" in name:
lowercase : Union[str, Any] ='''running_mean'''
elif "inv_freq" in name:
lowercase : Any ='''inv_freq'''
elif "running_var" in name:
lowercase : Tuple ='''running_var'''
elif "num_batches_tracked" in name:
lowercase : Dict ='''num_batches_tracked'''
else:
lowercase : str =None
set_recursively(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
continue
if not is_used:
unused_weights.append(__magic_name__ )
logger.warning(f'''Unused weights: {unused_weights}''' )
def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Tuple ) -> int:
lowercase : Optional[Any] =full_name.split('''conv_layers.''' )[-1]
lowercase : Any =name.split('''.''' )
lowercase : List[str] =int(items[0] )
lowercase : Union[str, Any] =int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
lowercase : Union[str, Any] =value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
lowercase : Optional[Any] =value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
lowercase : Optional[int] =value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
lowercase : str =value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__magic_name__ )
@torch.no_grad()
def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : Tuple=None , __magic_name__ : Optional[Any]=None , __magic_name__ : Union[str, Any]=True ) -> Union[str, Any]:
if config_path is not None:
lowercase : Optional[Any] =WavaVecaConformerConfig.from_pretrained(__magic_name__ , hidden_act='''swish''' )
else:
lowercase : Optional[int] =WavaVecaConformerConfig()
if "rope" in checkpoint_path:
lowercase : Dict ='''rotary'''
if is_finetuned:
if dict_path:
lowercase : Optional[Any] =Dictionary.load(__magic_name__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowercase : str =target_dict.pad_index
lowercase : Union[str, Any] =target_dict.bos_index
lowercase : Any =target_dict.eos_index
lowercase : Tuple =len(target_dict.symbols )
lowercase : str =os.path.join(__magic_name__ , '''vocab.json''' )
if not os.path.isdir(__magic_name__ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__magic_name__ ) )
return
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
lowercase : Dict =target_dict.indices
# fairseq has the <pad> and <s> switched
lowercase : str =0
lowercase : List[Any] =1
with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(__magic_name__ , __magic_name__ )
lowercase : List[str] =WavaVecaCTCTokenizer(
__magic_name__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__magic_name__ , )
lowercase : Optional[int] =True if config.feat_extract_norm == '''layer''' else False
lowercase : str =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__magic_name__ , return_attention_mask=__magic_name__ , )
lowercase : Tuple =WavaVecaProcessor(feature_extractor=__magic_name__ , tokenizer=__magic_name__ )
processor.save_pretrained(__magic_name__ )
lowercase : str =WavaVecaConformerForCTC(__magic_name__ )
else:
lowercase : Tuple =WavaVecaConformerForPreTraining(__magic_name__ )
if is_finetuned:
lowercase , lowercase , lowercase : Optional[int] =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
lowercase : Dict =argparse.Namespace(task='''audio_pretraining''' )
lowercase : Optional[int] =fairseq.tasks.setup_task(__magic_name__ )
lowercase , lowercase , lowercase : str =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__magic_name__ )
lowercase : List[Any] =model[0].eval()
recursively_load_weights(__magic_name__ , __magic_name__ , not is_finetuned )
hf_wavavec.save_pretrained(__magic_name__ )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
UpperCamelCase_ = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 92
| 0
|
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=7 , UpperCAmelCase=3 , UpperCAmelCase=1_8 , UpperCAmelCase=3_0 , UpperCAmelCase=4_0_0 , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=[0.5, 0.5, 0.5] , UpperCAmelCase=[0.5, 0.5, 0.5] , ):
__lowerCamelCase = size if size is not None else {"""height""": 1_8, """width""": 1_8}
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = num_channels
__lowerCamelCase = image_size
__lowerCamelCase = min_resolution
__lowerCamelCase = max_resolution
__lowerCamelCase = do_resize
__lowerCamelCase = size
__lowerCamelCase = do_normalize
__lowerCamelCase = image_mean
__lowerCamelCase = image_std
def lowerCamelCase_ ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCamelCase_ ( __UpperCamelCase ,unittest.TestCase ):
"""simple docstring"""
A = DPTImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self ):
__lowerCamelCase = DPTImageProcessingTester(self )
@property
def lowerCamelCase_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase , """image_mean""" ) )
self.assertTrue(hasattr(UpperCAmelCase , """image_std""" ) )
self.assertTrue(hasattr(UpperCAmelCase , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(UpperCAmelCase , """size""" ) )
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8} )
__lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2} )
def lowerCamelCase_ ( self ):
# Initialize image_processing
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , Image.Image )
# Test not batched input
__lowerCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__lowerCamelCase = image_processing(UpperCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCamelCase_ ( self ):
# Initialize image_processing
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , numpify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , np.ndarray )
# Test not batched input
__lowerCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__lowerCamelCase = image_processing(UpperCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCamelCase_ ( self ):
# Initialize image_processing
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , torchify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , torch.Tensor )
# Test not batched input
__lowerCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__lowerCamelCase = image_processing(UpperCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 571
|
from ...processing_utils import ProcessorMixin
class UpperCamelCase_ ( __UpperCamelCase ):
"""simple docstring"""
A = ['''image_processor''', '''feature_extractor''']
A = '''TvltImageProcessor'''
A = '''TvltFeatureExtractor'''
def __init__( self , UpperCAmelCase , UpperCAmelCase ):
super().__init__(image_processor=UpperCAmelCase , feature_extractor=UpperCAmelCase )
__lowerCamelCase = image_processor
__lowerCamelCase = feature_extractor
def __call__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=False , UpperCAmelCase=False , *UpperCAmelCase , **UpperCAmelCase , ):
if images is None and audio is None:
raise ValueError("""You need to specify either an `images` or `audio` input to process.""" )
__lowerCamelCase = None
if images is not None:
__lowerCamelCase = self.image_processor(UpperCAmelCase , mask_pixel=UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase )
if images_mixed is not None:
__lowerCamelCase = self.image_processor(UpperCAmelCase , is_mixed=UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase )
if audio is not None:
__lowerCamelCase = self.feature_extractor(
UpperCAmelCase , *UpperCAmelCase , sampling_rate=UpperCAmelCase , mask_audio=UpperCAmelCase , **UpperCAmelCase )
__lowerCamelCase = {}
if audio is not None:
output_dict.update(UpperCAmelCase )
if images is not None:
output_dict.update(UpperCAmelCase )
if images_mixed_dict is not None:
output_dict.update(UpperCAmelCase )
return output_dict
@property
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.image_processor.model_input_names
__lowerCamelCase = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 571
| 1
|
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def lowercase__ ( __lowercase : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase = image.size
__UpperCamelCase , __UpperCamelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__UpperCamelCase = image.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] )
__UpperCamelCase = np.array(__lowercase ).astype(np.floataa ) / 2_5_5.0
__UpperCamelCase = image[None].transpose(0 , 3 , 1 , 2 )
__UpperCamelCase = torch.from_numpy(__lowercase )
return 2.0 * image - 1.0
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , __A : VQModel , __A : UNetaDModel , __A : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
super().__init__()
self.register_modules(vqvae=__A , unet=__A , scheduler=__A )
@torch.no_grad()
def __call__( self : int , __A : Union[torch.Tensor, PIL.Image.Image] = None , __A : Optional[int] = 1 , __A : Optional[int] = 1_0_0 , __A : Optional[float] = 0.0 , __A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __A : Optional[str] = "pil" , __A : bool = True , ):
if isinstance(__A , PIL.Image.Image ):
__UpperCamelCase = 1
elif isinstance(__A , torch.Tensor ):
__UpperCamelCase = image.shape[0]
else:
raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__A )}''' )
if isinstance(__A , PIL.Image.Image ):
__UpperCamelCase = preprocess(__A )
__UpperCamelCase , __UpperCamelCase = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
__UpperCamelCase = (batch_size, self.unet.config.in_channels // 2, height, width)
__UpperCamelCase = next(self.unet.parameters() ).dtype
__UpperCamelCase = randn_tensor(__A , generator=__A , device=self.device , dtype=__A )
__UpperCamelCase = image.to(device=self.device , dtype=__A )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(__A , device=self.device )
__UpperCamelCase = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
__UpperCamelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__UpperCamelCase = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__UpperCamelCase = {}
if accepts_eta:
__UpperCamelCase = eta
for t in self.progress_bar(__A ):
# concat latents and low resolution image in the channel dimension.
__UpperCamelCase = torch.cat([latents, image] , dim=1 )
__UpperCamelCase = self.scheduler.scale_model_input(__A , __A )
# predict the noise residual
__UpperCamelCase = self.unet(__A , __A ).sample
# compute the previous noisy sample x_t -> x_t-1
__UpperCamelCase = self.scheduler.step(__A , __A , __A , **__A ).prev_sample
# decode the image latents with the VQVAE
__UpperCamelCase = self.vqvae.decode(__A ).sample
__UpperCamelCase = torch.clamp(__A , -1.0 , 1.0 )
__UpperCamelCase = image / 2 + 0.5
__UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__UpperCamelCase = self.numpy_to_pil(__A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__A )
| 399
|
'''simple docstring'''
def lowercase__ ( __lowercase : int , __lowercase : Tuple , __lowercase : Tuple ) -> Any:
"""simple docstring"""
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(__lowercase , n - 1 , __lowercase ) * a) % mod
else:
__UpperCamelCase = binary_exponentiation(__lowercase , n / 2 , __lowercase )
return (b * b) % mod
# a prime number
a__ : List[str] =701
a__ : Union[str, Any] =1_000_000_000
a__ : Union[str, Any] =10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 399
| 1
|
"""simple docstring"""
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 716
|
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def a__ ( lowerCAmelCase : List[str] ):
'''simple docstring'''
def wrapper(*lowerCAmelCase : Any , **lowerCAmelCase : Tuple ):
UpperCAmelCase__ : Optional[int] = timeit.default_timer()
UpperCAmelCase__ : int = func(*lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase__ : List[Any] = timeit.default_timer() - starttime
return delta
UpperCAmelCase__ : int = func.__name__
return wrapper
def a__ ( lowerCAmelCase : dict , lowerCAmelCase : Optional[int]=100 , lowerCAmelCase : List[str]=None ):
'''simple docstring'''
UpperCAmelCase__ : str = []
UpperCAmelCase__ : Optional[Any] = seq_shapes or {}
for i in range(lowerCAmelCase ):
UpperCAmelCase__ : int = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(lowerCAmelCase , _ArrayXD ):
UpperCAmelCase__ : List[str] = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(lowerCAmelCase , datasets.Value ):
if v.dtype == "string":
UpperCAmelCase__ : Dict = "The small grey turtle was surprisingly fast when challenged."
else:
UpperCAmelCase__ : str = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(lowerCAmelCase , datasets.Sequence ):
while isinstance(lowerCAmelCase , datasets.Sequence ):
UpperCAmelCase__ : List[str] = v.feature
UpperCAmelCase__ : Optional[int] = seq_shapes[k]
UpperCAmelCase__ : Optional[int] = np.random.rand(*lowerCAmelCase ).astype(v.dtype )
UpperCAmelCase__ : Union[str, Any] = data
dummy_data.append((i, example) )
return dummy_data
def a__ ( lowerCAmelCase : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : List[str]=100 , lowerCAmelCase : Optional[int]=None ):
'''simple docstring'''
UpperCAmelCase__ : int = generate_examples(lowerCAmelCase , num_examples=lowerCAmelCase , seq_shapes=lowerCAmelCase )
with ArrowWriter(features=lowerCAmelCase , path=lowerCAmelCase ) as writer:
for key, record in dummy_data:
UpperCAmelCase__ : List[Any] = features.encode_example(lowerCAmelCase )
writer.write(lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." )
UpperCAmelCase__ : Optional[int] = datasets.Dataset.from_file(filename=lowerCAmelCase , info=datasets.DatasetInfo(features=lowerCAmelCase ) )
return dataset
| 660
| 0
|
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> float:
if mass < 0:
raise ValueError("""The mass of a body cannot be negative""" )
return 0.5 * mass * abs(__snake_case ) * abs(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 108
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AutoformerForPrediction''',
'''AutoformerModel''',
'''AutoformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 1
| 0
|
"""simple docstring"""
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ : int = logging.get_logger(__name__)
# TODO Update this
UpperCamelCase_ : Optional[int] = {
"""facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class _lowercase ( lowerCAmelCase ):
_a : List[str] = '''esm'''
def __init__( self : Optional[int] , a : int=None , a : List[str]=None , a : str=None , a : Union[str, Any]=7_6_8 , a : Dict=1_2 , a : Union[str, Any]=1_2 , a : List[str]=3_0_7_2 , a : Optional[Any]=0.1 , a : int=0.1 , a : str=1_0_2_6 , a : Any=0.0_2 , a : Optional[int]=1e-12 , a : Union[str, Any]="absolute" , a : List[str]=True , a : List[str]=None , a : Any=False , a : Union[str, Any]=False , a : Optional[int]=None , a : Any=None , **a : str , ):
"""simple docstring"""
super().__init__(pad_token_id=a , mask_token_id=a , **a )
__snake_case : Any =vocab_size
__snake_case : List[Any] =hidden_size
__snake_case : Any =num_hidden_layers
__snake_case : Any =num_attention_heads
__snake_case : Union[str, Any] =intermediate_size
__snake_case : Any =hidden_dropout_prob
__snake_case : Union[str, Any] =attention_probs_dropout_prob
__snake_case : Dict =max_position_embeddings
__snake_case : Union[str, Any] =initializer_range
__snake_case : Optional[Any] =layer_norm_eps
__snake_case : List[Any] =position_embedding_type
__snake_case : Any =use_cache
__snake_case : Optional[int] =emb_layer_norm_before
__snake_case : Any =token_dropout
__snake_case : Tuple =is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
__snake_case : int =EsmFoldConfig()
elif isinstance(a , a ):
__snake_case : Union[str, Any] =EsmFoldConfig(**a )
__snake_case : Union[str, Any] =esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
__snake_case : List[Any] =get_default_vocab_list()
else:
__snake_case : str =vocab_list
else:
__snake_case : Optional[Any] =None
__snake_case : List[str] =None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , a ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def _UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
__snake_case : Optional[int] =super().to_dict()
if isinstance(self.esmfold_config , a ):
__snake_case : Tuple =self.esmfold_config.to_dict()
return output
@dataclass
class _lowercase :
_a : str = None
_a : bool = True
_a : bool = False
_a : bool = False
_a : bool = False
_a : float = 0
_a : bool = True
_a : bool = False
_a : int = 128
_a : "TrunkConfig" = None
def _UpperCamelCase ( self : int ):
"""simple docstring"""
if self.trunk is None:
__snake_case : int =TrunkConfig()
elif isinstance(self.trunk , a ):
__snake_case : int =TrunkConfig(**self.trunk )
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
__snake_case : List[str] =asdict(self )
__snake_case : Union[str, Any] =self.trunk.to_dict()
return output
@dataclass
class _lowercase :
_a : int = 48
_a : int = 1_024
_a : int = 128
_a : int = 32
_a : int = 32
_a : int = 32
_a : float = 0
_a : float = 0
_a : bool = False
_a : int = 4
_a : Optional[int] = 128
_a : "StructureModuleConfig" = None
def _UpperCamelCase ( self : str ):
"""simple docstring"""
if self.structure_module is None:
__snake_case : Optional[Any] =StructureModuleConfig()
elif isinstance(self.structure_module , a ):
__snake_case : Dict =StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
f''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
f''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
__snake_case : List[str] =self.sequence_state_dim // self.sequence_head_width
__snake_case : List[str] =self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
f''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
f''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(f'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
__snake_case : str =asdict(self )
__snake_case : Dict =self.structure_module.to_dict()
return output
@dataclass
class _lowercase :
_a : int = 384
_a : int = 128
_a : int = 16
_a : int = 128
_a : int = 12
_a : int = 4
_a : int = 8
_a : float = 0.1
_a : int = 8
_a : int = 1
_a : int = 2
_a : int = 7
_a : int = 10
_a : float = 1e-8
_a : float = 1e5
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return asdict(self )
def __lowercase ( ) -> int:
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 497
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCamelCase_ : Union[str, Any] = {"""vocab_file""": """sentencepiece.bpe.model"""}
UpperCamelCase_ : Optional[Any] = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
}
UpperCamelCase_ : int = {
"""moussaKam/mbarthez""": 1024,
"""moussaKam/barthez""": 1024,
"""moussaKam/barthez-orangesum-title""": 1024,
}
UpperCamelCase_ : Optional[int] = """▁"""
class _lowercase ( lowerCAmelCase ):
_a : int = VOCAB_FILES_NAMES
_a : int = PRETRAINED_VOCAB_FILES_MAP
_a : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : Optional[int] = ['''input_ids''', '''attention_mask''']
def __init__( self : str , a : Optional[Any] , a : Optional[Any]="<s>" , a : Dict="</s>" , a : Any="</s>" , a : Optional[int]="<s>" , a : Optional[Any]="<unk>" , a : int="<pad>" , a : Tuple="<mask>" , a : Optional[Dict[str, Any]] = None , **a : List[Any] , ):
"""simple docstring"""
__snake_case : Dict =AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
__snake_case : List[str] ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , sp_model_kwargs=self.sp_model_kwargs , **a , )
__snake_case : Optional[int] =vocab_file
__snake_case : Dict =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(a ) )
__snake_case : Optional[Any] ={'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
__snake_case : Optional[int] =len(self.sp_model ) - 1
__snake_case : Union[str, Any] ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def _UpperCamelCase ( self : Tuple , a : List[int] , a : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__snake_case : Dict =[self.cls_token_id]
__snake_case : List[str] =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _UpperCamelCase ( self : str , a : List[int] , a : Optional[List[int]] = None , a : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
if token_ids_a is None:
return [1] + ([0] * len(a )) + [1]
return [1] + ([0] * len(a )) + [1, 1] + ([0] * len(a )) + [1]
def _UpperCamelCase ( self : Dict , a : List[int] , a : Optional[List[int]] = None ):
"""simple docstring"""
__snake_case : Optional[int] =[self.sep_token_id]
__snake_case : int =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return len(self.sp_model )
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case : List[Any] ={self.convert_ids_to_tokens(a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _UpperCamelCase ( self : str , a : str ):
"""simple docstring"""
return self.sp_model.encode(a , out_type=a )
def _UpperCamelCase ( self : int , a : Union[str, Any] ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__snake_case : List[Any] =self.sp_model.PieceToId(a )
return spm_id if spm_id else self.unk_token_id
def _UpperCamelCase ( self : List[str] , a : Dict ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(a )
def _UpperCamelCase ( self : Optional[int] , a : int ):
"""simple docstring"""
__snake_case : int =[]
__snake_case : Optional[int] =''''''
__snake_case : Optional[int] =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a ) + token
__snake_case : str =True
__snake_case : int =[]
else:
current_sub_tokens.append(a )
__snake_case : Tuple =False
out_string += self.sp_model.decode(a )
return out_string.strip()
def __getstate__( self : Optional[int] ):
"""simple docstring"""
__snake_case : List[Any] =self.__dict__.copy()
__snake_case : Optional[Any] =None
return state
def __setstate__( self : Optional[int] , a : Optional[Any] ):
"""simple docstring"""
__snake_case : Any =d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__snake_case : Tuple ={}
__snake_case : int =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCamelCase ( self : Optional[Any] , a : str , a : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__snake_case : int =os.path.join(
a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a )
elif not os.path.isfile(self.vocab_file ):
with open(a , '''wb''' ) as fi:
__snake_case : List[Any] =self.sp_model.serialized_model_proto()
fi.write(a )
return (out_vocab_file,)
| 497
| 1
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
A : Tuple = tempfile.mkdtemp()
A : Optional[int] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
A : Union[str, Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file, """w""", encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
A : List[Any] = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.4814_5466, 0.457_8275, 0.4082_1073],
"""image_std""": [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
A : str = os.path.join(self.tmpdirname, _lowercase )
with open(self.image_processor_file, """w""", encoding="""utf-8""" ) as fp:
json.dump(_lowercase, _lowercase )
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return BertTokenizer.from_pretrained(self.tmpdirname, **_lowercase )
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return BertTokenizerFast.from_pretrained(self.tmpdirname, **_lowercase )
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname, **_lowercase )
def _lowerCAmelCase ( self ):
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self ):
A : Optional[Any] = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
A : List[str] = [Image.fromarray(np.moveaxis(_lowercase, 0, -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCAmelCase ( self ):
A : Optional[Any] = self.get_tokenizer()
A : int = self.get_rust_tokenizer()
A : Dict = self.get_image_processor()
A : int = AlignProcessor(tokenizer=_lowercase, image_processor=_lowercase )
processor_slow.save_pretrained(self.tmpdirname )
A : Any = AlignProcessor.from_pretrained(self.tmpdirname, use_fast=_lowercase )
A : str = AlignProcessor(tokenizer=_lowercase, image_processor=_lowercase )
processor_fast.save_pretrained(self.tmpdirname )
A : Union[str, Any] = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer, _lowercase )
self.assertIsInstance(processor_fast.tokenizer, _lowercase )
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor, _lowercase )
self.assertIsInstance(processor_fast.image_processor, _lowercase )
def _lowerCAmelCase ( self ):
A : List[str] = AlignProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A : Optional[int] = self.get_tokenizer(bos_token="""(BOS)""", eos_token="""(EOS)""" )
A : List[str] = self.get_image_processor(do_normalize=_lowercase, padding_value=1.0 )
A : Any = AlignProcessor.from_pretrained(
self.tmpdirname, bos_token="""(BOS)""", eos_token="""(EOS)""", do_normalize=_lowercase, padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, _lowercase )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, _lowercase )
def _lowerCAmelCase ( self ):
A : Any = self.get_image_processor()
A : Dict = self.get_tokenizer()
A : Optional[Any] = AlignProcessor(tokenizer=_lowercase, image_processor=_lowercase )
A : Tuple = self.prepare_image_inputs()
A : Tuple = image_processor(_lowercase, return_tensors="""np""" )
A : Tuple = processor(images=_lowercase, return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1e-2 )
def _lowerCAmelCase ( self ):
A : Dict = self.get_image_processor()
A : List[Any] = self.get_tokenizer()
A : Union[str, Any] = AlignProcessor(tokenizer=_lowercase, image_processor=_lowercase )
A : str = """lower newer"""
A : List[Any] = processor(text=_lowercase )
A : Union[str, Any] = tokenizer(_lowercase, padding="""max_length""", max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def _lowerCAmelCase ( self ):
A : Dict = self.get_image_processor()
A : Union[str, Any] = self.get_tokenizer()
A : int = AlignProcessor(tokenizer=_lowercase, image_processor=_lowercase )
A : int = """lower newer"""
A : int = self.prepare_image_inputs()
A : int = processor(text=_lowercase, images=_lowercase )
self.assertListEqual(list(inputs.keys() ), ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(_lowercase ):
processor()
def _lowerCAmelCase ( self ):
A : Union[str, Any] = self.get_image_processor()
A : Optional[int] = self.get_tokenizer()
A : List[Any] = AlignProcessor(tokenizer=_lowercase, image_processor=_lowercase )
A : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A : Dict = processor.batch_decode(_lowercase )
A : List[Any] = tokenizer.batch_decode(_lowercase )
self.assertListEqual(_lowercase, _lowercase )
def _lowerCAmelCase ( self ):
A : str = self.get_image_processor()
A : Optional[int] = self.get_tokenizer()
A : List[Any] = AlignProcessor(tokenizer=_lowercase, image_processor=_lowercase )
A : Optional[Any] = """lower newer"""
A : int = self.prepare_image_inputs()
A : List[Any] = processor(text=_lowercase, images=_lowercase )
self.assertListEqual(list(inputs.keys() ), processor.model_input_names )
| 662
|
"""simple docstring"""
from __future__ import annotations
def __snake_case ( UpperCamelCase__ ) -> list[int]: # This function is recursive
"""simple docstring"""
A = len(UpperCamelCase__ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
A = array[0]
A = False
A = 1
A = []
while not is_found and i < array_length:
if array[i] < pivot:
A = True
A = [element for element in array[i:] if element >= array[i]]
A = longest_subsequence(UpperCamelCase__ )
if len(UpperCamelCase__ ) > len(UpperCamelCase__ ):
A = temp_array
else:
i += 1
A = [element for element in array[1:] if element >= pivot]
A = [pivot, *longest_subsequence(UpperCamelCase__ )]
if len(UpperCamelCase__ ) > len(UpperCamelCase__ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 690
| 0
|
"""simple docstring"""
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
a__ : Optional[Any] = 'scheduler_config.json'
class UpperCamelCase_ ( __lowercase):
"""simple docstring"""
snake_case__ : List[Any] = 1
snake_case__ : Tuple = 2
snake_case__ : Dict = 3
snake_case__ : Tuple = 4
snake_case__ : Tuple = 5
@dataclass
class UpperCamelCase_ ( __lowercase):
"""simple docstring"""
snake_case__ : jnp.ndarray
class UpperCamelCase_ :
"""simple docstring"""
snake_case__ : Tuple = SCHEDULER_CONFIG_NAME
snake_case__ : Union[str, Any] = ['''dtype''']
snake_case__ : Union[str, Any] = []
snake_case__ : Any = True
@classmethod
def UpperCAmelCase_ ( cls : Union[str, Any] , UpperCAmelCase__ : Dict[str, Any] = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Dict=False , **UpperCAmelCase__ : Any , ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = cls.load_config(
pretrained_model_name_or_path=__a , subfolder=__a , return_unused_kwargs=__a , **__a , )
__SCREAMING_SNAKE_CASE = cls.from_config(__a , return_unused_kwargs=__a , **__a )
if hasattr(__a , "create_state" ) and getattr(__a , "has_state" , __a ):
__SCREAMING_SNAKE_CASE = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : Union[str, os.PathLike] , UpperCAmelCase__ : bool = False , **UpperCAmelCase__ : int ) -> Optional[Any]:
self.save_config(save_directory=__a , push_to_hub=__a , **__a )
@property
def UpperCAmelCase_ ( self : str ) -> int:
return self._get_compatibles()
@classmethod
def UpperCAmelCase_ ( cls : int ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = list(set([cls.__name__] + cls._compatibles ) )
__SCREAMING_SNAKE_CASE = importlib.import_module(__name__.split("." )[0] )
__SCREAMING_SNAKE_CASE = [
getattr(__a , __a ) for c in compatible_classes_str if hasattr(__a , __a )
]
return compatible_classes
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
assert len(UpperCamelCase__ ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(UpperCamelCase__ ) - x.ndim) ) , UpperCamelCase__ )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_=0.999 , lowerCAmelCase_=jnp.floataa ):
'''simple docstring'''
def alpha_bar(lowerCAmelCase_ ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
__SCREAMING_SNAKE_CASE = []
for i in range(UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = i / num_diffusion_timesteps
__SCREAMING_SNAKE_CASE = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(UpperCamelCase__ ) / alpha_bar(UpperCamelCase__ ) , UpperCamelCase__ ) )
return jnp.array(UpperCamelCase__ , dtype=UpperCamelCase__ )
@flax.struct.dataclass
class UpperCamelCase_ :
"""simple docstring"""
snake_case__ : jnp.ndarray
snake_case__ : jnp.ndarray
snake_case__ : jnp.ndarray
@classmethod
def UpperCAmelCase_ ( cls : Optional[Any] , UpperCAmelCase__ : List[str] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = scheduler.config
if config.trained_betas is not None:
__SCREAMING_SNAKE_CASE = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
__SCREAMING_SNAKE_CASE = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__SCREAMING_SNAKE_CASE = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__SCREAMING_SNAKE_CASE = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F"""beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}""" )
__SCREAMING_SNAKE_CASE = 1.0 - betas
__SCREAMING_SNAKE_CASE = jnp.cumprod(__a , axis=0 )
return cls(
alphas=__a , betas=__a , alphas_cumprod=__a , )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = state.alphas_cumprod
__SCREAMING_SNAKE_CASE = alphas_cumprod[timesteps] ** 0.5
__SCREAMING_SNAKE_CASE = sqrt_alpha_prod.flatten()
__SCREAMING_SNAKE_CASE = broadcast_to_shape_from_left(UpperCamelCase__ , original_samples.shape )
__SCREAMING_SNAKE_CASE = (1 - alphas_cumprod[timesteps]) ** 0.5
__SCREAMING_SNAKE_CASE = sqrt_one_minus_alpha_prod.flatten()
__SCREAMING_SNAKE_CASE = broadcast_to_shape_from_left(UpperCamelCase__ , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_sqrt_alpha_prod(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__SCREAMING_SNAKE_CASE = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_sqrt_alpha_prod(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__SCREAMING_SNAKE_CASE = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 720
|
"""simple docstring"""
from __future__ import annotations
import math
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase__ : int ) -> None:
__SCREAMING_SNAKE_CASE = size
# approximate the overall size of segment tree with given value
__SCREAMING_SNAKE_CASE = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
__SCREAMING_SNAKE_CASE = [0 for i in range(0 , 4 * size )]
__SCREAMING_SNAKE_CASE = [0 for i in range(0 , 4 * size )] # flag for lazy update
def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : int ) -> int:
return idx * 2
def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : int ) -> int:
return idx * 2 + 1
def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : list[int] ) -> None:
if left_element == right_element:
__SCREAMING_SNAKE_CASE = a[left_element - 1]
else:
__SCREAMING_SNAKE_CASE = (left_element + right_element) // 2
self.build(self.left(UpperCAmelCase__ ) , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
self.build(self.right(UpperCAmelCase__ ) , mid + 1 , UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = max(
self.segment_tree[self.left(UpperCAmelCase__ )] , self.segment_tree[self.right(UpperCAmelCase__ )] )
def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> bool:
if self.flag[idx] is True:
__SCREAMING_SNAKE_CASE = self.lazy[idx]
__SCREAMING_SNAKE_CASE = False
if left_element != right_element:
__SCREAMING_SNAKE_CASE = self.lazy[idx]
__SCREAMING_SNAKE_CASE = self.lazy[idx]
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
__SCREAMING_SNAKE_CASE = val
if left_element != right_element:
__SCREAMING_SNAKE_CASE = val
__SCREAMING_SNAKE_CASE = val
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
return True
__SCREAMING_SNAKE_CASE = (left_element + right_element) // 2
self.update(self.left(UpperCAmelCase__ ) , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
self.update(self.right(UpperCAmelCase__ ) , mid + 1 , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = max(
self.segment_tree[self.left(UpperCAmelCase__ )] , self.segment_tree[self.right(UpperCAmelCase__ )] )
return True
def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> int | float:
if self.flag[idx] is True:
__SCREAMING_SNAKE_CASE = self.lazy[idx]
__SCREAMING_SNAKE_CASE = False
if left_element != right_element:
__SCREAMING_SNAKE_CASE = self.lazy[idx]
__SCREAMING_SNAKE_CASE = self.lazy[idx]
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
__SCREAMING_SNAKE_CASE = (left_element + right_element) // 2
__SCREAMING_SNAKE_CASE = self.query(self.left(UpperCAmelCase__ ) , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.query(self.right(UpperCAmelCase__ ) , mid + 1 , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return max(UpperCAmelCase__ , UpperCAmelCase__ )
def __str__( self : int ) -> str:
return str([self.query(1 , 1 , self.size , UpperCAmelCase__ , UpperCAmelCase__ ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
a__ : Tuple = [1, 2, -4, 7, 3, -5, 6, 1_1, -2_0, 9, 1_4, 1_5, 5, 2, -8]
a__ : Dict = 1_5
a__ : int = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 1_1))
print(segt.query(1, 1, size, 7, 1_2))
segt.update(1, 1, size, 1, 3, 1_1_1)
print(segt.query(1, 1, size, 1, 1_5))
segt.update(1, 1, size, 7, 8, 2_3_5)
print(segt)
| 553
| 0
|
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'''tensor(bool)''': np.bool_,
'''tensor(int8)''': np.inta,
'''tensor(uint8)''': np.uinta,
'''tensor(int16)''': np.intaa,
'''tensor(uint16)''': np.uintaa,
'''tensor(int32)''': np.intaa,
'''tensor(uint32)''': np.uintaa,
'''tensor(int64)''': np.intaa,
'''tensor(uint64)''': np.uintaa,
'''tensor(float16)''': np.floataa,
'''tensor(float)''': np.floataa,
'''tensor(double)''': np.floataa,
}
class __snake_case :
"""simple docstring"""
def __init__( self , _UpperCamelCase=None , **_UpperCamelCase ) -> int:
"""simple docstring"""
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" )
__snake_case = model
__snake_case = kwargs.get("""model_save_dir""" , _UpperCamelCase )
__snake_case = kwargs.get("""latest_model_name""" , _UpperCamelCase )
def __call__( self , **_UpperCamelCase ) -> Tuple:
"""simple docstring"""
__snake_case = {k: np.array(_UpperCamelCase ) for k, v in kwargs.items()}
return self.model.run(_UpperCamelCase , _UpperCamelCase )
@staticmethod
def a ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None ) -> Optional[Any]:
"""simple docstring"""
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" )
__snake_case = """CPUExecutionProvider"""
return ort.InferenceSession(_UpperCamelCase , providers=[provider] , sess_options=_UpperCamelCase )
def a ( self , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case = file_name if file_name is not None else ONNX_WEIGHTS_NAME
__snake_case = self.model_save_dir.joinpath(self.latest_model_name )
__snake_case = Path(_UpperCamelCase ).joinpath(_UpperCamelCase )
try:
shutil.copyfile(_UpperCamelCase , _UpperCamelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
__snake_case = self.model_save_dir.joinpath(_UpperCamelCase )
if src_path.exists():
__snake_case = Path(_UpperCamelCase ).joinpath(_UpperCamelCase )
try:
shutil.copyfile(_UpperCamelCase , _UpperCamelCase )
except shutil.SameFileError:
pass
def a ( self , _UpperCamelCase , **_UpperCamelCase , ) -> List[Any]:
"""simple docstring"""
if os.path.isfile(_UpperCamelCase ):
logger.error(F'Provided path ({save_directory}) should be a directory, not a file' )
return
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
# saving model weights/files
self._save_pretrained(_UpperCamelCase , **_UpperCamelCase )
@classmethod
def a ( cls , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> str:
"""simple docstring"""
__snake_case = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_UpperCamelCase ):
__snake_case = OnnxRuntimeModel.load_model(
os.path.join(_UpperCamelCase , _UpperCamelCase ) , provider=_UpperCamelCase , sess_options=_UpperCamelCase )
__snake_case = Path(_UpperCamelCase )
# load model from hub
else:
# download model
__snake_case = hf_hub_download(
repo_id=_UpperCamelCase , filename=_UpperCamelCase , use_auth_token=_UpperCamelCase , revision=_UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , )
__snake_case = Path(_UpperCamelCase ).parent
__snake_case = Path(_UpperCamelCase ).name
__snake_case = OnnxRuntimeModel.load_model(_UpperCamelCase , provider=_UpperCamelCase , sess_options=_UpperCamelCase )
return cls(model=_UpperCamelCase , **_UpperCamelCase )
@classmethod
def a ( cls , _UpperCamelCase , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> Any:
"""simple docstring"""
__snake_case = None
if len(str(_UpperCamelCase ).split("""@""" ) ) == 2:
__snake_case , __snake_case = model_id.split("""@""" )
return cls._from_pretrained(
model_id=_UpperCamelCase , revision=_UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , use_auth_token=_UpperCamelCase , **_UpperCamelCase , )
| 268
|
from __future__ import annotations
class __snake_case :
"""simple docstring"""
def __init__( self , _UpperCamelCase , _UpperCamelCase ) -> Tuple:
"""simple docstring"""
__snake_case , __snake_case = text, pattern
__snake_case , __snake_case = len(_UpperCamelCase ), len(_UpperCamelCase )
def a ( self , _UpperCamelCase ) -> int:
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def a ( self , _UpperCamelCase ) -> int:
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def a ( self ) -> list[int]:
"""simple docstring"""
__snake_case = []
for i in range(self.textLen - self.patLen + 1 ):
__snake_case = self.mismatch_in_text(_UpperCamelCase )
if mismatch_index == -1:
positions.append(_UpperCamelCase )
else:
__snake_case = self.match_in_pattern(self.text[mismatch_index] )
__snake_case = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
UpperCamelCase__ = '''ABAABA'''
UpperCamelCase__ = '''AB'''
UpperCamelCase__ = BoyerMooreSearch(text, pattern)
UpperCamelCase__ = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 268
| 1
|
"""simple docstring"""
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple = AutoencoderKL
SCREAMING_SNAKE_CASE_: Tuple = "sample"
SCREAMING_SNAKE_CASE_: Tuple = 1e-2
@property
def __lowerCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_lowerCAmelCase = 4
_lowerCAmelCase = 3
_lowerCAmelCase = (32, 32)
_lowerCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase_ )
return {"sample": image}
@property
def __lowerCamelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
return (3, 32, 32)
@property
def __lowerCamelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
return (3, 32, 32)
def __lowerCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
_lowerCAmelCase = self.dummy_input
return init_dict, inputs_dict
def __lowerCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
pass
def __lowerCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skipIf(torch_device == 'mps' , 'Gradient checkpointing skipped on MPS' )
def __lowerCamelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.prepare_init_args_and_inputs_for_common()
_lowerCAmelCase = self.model_class(**UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
assert not model.is_gradient_checkpointing and model.training
_lowerCAmelCase = model(**UpperCAmelCase_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
_lowerCAmelCase = torch.randn_like(UpperCAmelCase_ )
_lowerCAmelCase = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
_lowerCAmelCase = self.model_class(**UpperCAmelCase_ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(UpperCAmelCase_ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
_lowerCAmelCase = model_a(**UpperCAmelCase_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
_lowerCAmelCase = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
_lowerCAmelCase = dict(model.named_parameters() )
_lowerCAmelCase = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def __lowerCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' , output_loading_info=UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(UpperCAmelCase_ )
_lowerCAmelCase = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def __lowerCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
_lowerCAmelCase = AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' )
_lowerCAmelCase = model.to(UpperCAmelCase_ )
model.eval()
if torch_device == "mps":
_lowerCAmelCase = torch.manual_seed(0 )
else:
_lowerCAmelCase = torch.Generator(device=UpperCAmelCase_ ).manual_seed(0 )
_lowerCAmelCase = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
_lowerCAmelCase = image.to(UpperCAmelCase_ )
with torch.no_grad():
_lowerCAmelCase = model(UpperCAmelCase_ , sample_posterior=UpperCAmelCase_ , generator=UpperCAmelCase_ ).sample
_lowerCAmelCase = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
_lowerCAmelCase = torch.tensor(
[
-4.0_0_7_8E-0_1,
-3.8_3_2_3E-0_4,
-1.2_6_8_1E-0_1,
-1.1_4_6_2E-0_1,
2.0_0_9_5E-0_1,
1.0_8_9_3E-0_1,
-8.8_2_4_7E-0_2,
-3.0_3_6_1E-0_1,
-9.8_6_4_4E-0_3,
] )
elif torch_device == "cpu":
_lowerCAmelCase = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
_lowerCAmelCase = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1E-2 ) )
@slow
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return F"""gaussian_noise_s={seed}_shape={'_'.join([str(UpperCAmelCase_ ) for s in shape] )}.npy"""
def __lowerCamelCase ( self : List[str] ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self : List[Any] , UpperCAmelCase_ : Optional[Any]=0 , UpperCAmelCase_ : Union[str, Any]=(4, 3, 512, 512) , UpperCAmelCase_ : Any=False ) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase = torch.floataa if fpaa else torch.floataa
_lowerCAmelCase = torch.from_numpy(load_hf_numpy(self.get_file_format(UpperCAmelCase_ , UpperCAmelCase_ ) ) ).to(UpperCAmelCase_ ).to(UpperCAmelCase_ )
return image
def __lowerCamelCase ( self : Optional[Any] , UpperCAmelCase_ : Optional[int]="CompVis/stable-diffusion-v1-4" , UpperCAmelCase_ : str=False ) -> int:
"""simple docstring"""
_lowerCAmelCase = 'fp16' if fpaa else None
_lowerCAmelCase = torch.floataa if fpaa else torch.floataa
_lowerCAmelCase = AutoencoderKL.from_pretrained(
UpperCAmelCase_ , subfolder='vae' , torch_dtype=UpperCAmelCase_ , revision=UpperCAmelCase_ , )
model.to(UpperCAmelCase_ ).eval()
return model
def __lowerCamelCase ( self : int , UpperCAmelCase_ : Optional[int]=0 ) -> List[Any]:
"""simple docstring"""
if torch_device == "mps":
return torch.manual_seed(UpperCAmelCase_ )
return torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def __lowerCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str ) -> Dict:
"""simple docstring"""
_lowerCAmelCase = self.get_sd_vae_model()
_lowerCAmelCase = self.get_sd_image(UpperCAmelCase_ )
_lowerCAmelCase = self.get_generator(UpperCAmelCase_ )
with torch.no_grad():
_lowerCAmelCase = model(UpperCAmelCase_ , generator=UpperCAmelCase_ , sample_posterior=UpperCAmelCase_ ).sample
assert sample.shape == image.shape
_lowerCAmelCase = sample[-1, -2:, -2:, :2].flatten().float().cpu()
_lowerCAmelCase = torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice )
assert torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def __lowerCamelCase ( self : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] ) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase = self.get_sd_vae_model(fpaa=UpperCAmelCase_ )
_lowerCAmelCase = self.get_sd_image(UpperCAmelCase_ , fpaa=UpperCAmelCase_ )
_lowerCAmelCase = self.get_generator(UpperCAmelCase_ )
with torch.no_grad():
_lowerCAmelCase = model(UpperCAmelCase_ , generator=UpperCAmelCase_ , sample_posterior=UpperCAmelCase_ ).sample
assert sample.shape == image.shape
_lowerCAmelCase = sample[-1, -2:, :2, -2:].flatten().float().cpu()
_lowerCAmelCase = torch.tensor(UpperCAmelCase_ )
assert torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def __lowerCamelCase ( self : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any ) -> Any:
"""simple docstring"""
_lowerCAmelCase = self.get_sd_vae_model()
_lowerCAmelCase = self.get_sd_image(UpperCAmelCase_ )
with torch.no_grad():
_lowerCAmelCase = model(UpperCAmelCase_ ).sample
assert sample.shape == image.shape
_lowerCAmelCase = sample[-1, -2:, -2:, :2].flatten().float().cpu()
_lowerCAmelCase = torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice )
assert torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def __lowerCamelCase ( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = self.get_sd_vae_model()
_lowerCAmelCase = self.get_sd_image(UpperCAmelCase_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
_lowerCAmelCase = model.decode(UpperCAmelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
_lowerCAmelCase = sample[-1, -2:, :2, -2:].flatten().cpu()
_lowerCAmelCase = torch.tensor(UpperCAmelCase_ )
assert torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def __lowerCamelCase ( self : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict ) -> Any:
"""simple docstring"""
_lowerCAmelCase = self.get_sd_vae_model(fpaa=UpperCAmelCase_ )
_lowerCAmelCase = self.get_sd_image(UpperCAmelCase_ , shape=(3, 4, 64, 64) , fpaa=UpperCAmelCase_ )
with torch.no_grad():
_lowerCAmelCase = model.decode(UpperCAmelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
_lowerCAmelCase = sample[-1, -2:, :2, -2:].flatten().float().cpu()
_lowerCAmelCase = torch.tensor(UpperCAmelCase_ )
assert torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='xformers is not required when using PyTorch 2.0.' )
def __lowerCamelCase ( self : int , UpperCAmelCase_ : Tuple ) -> List[str]:
"""simple docstring"""
_lowerCAmelCase = self.get_sd_vae_model(fpaa=UpperCAmelCase_ )
_lowerCAmelCase = self.get_sd_image(UpperCAmelCase_ , shape=(3, 4, 64, 64) , fpaa=UpperCAmelCase_ )
with torch.no_grad():
_lowerCAmelCase = model.decode(UpperCAmelCase_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
_lowerCAmelCase = model.decode(UpperCAmelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='xformers is not required when using PyTorch 2.0.' )
def __lowerCamelCase ( self : Any , UpperCAmelCase_ : int ) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase = self.get_sd_vae_model()
_lowerCAmelCase = self.get_sd_image(UpperCAmelCase_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
_lowerCAmelCase = model.decode(UpperCAmelCase_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
_lowerCAmelCase = model.decode(UpperCAmelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def __lowerCamelCase ( self : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase = self.get_sd_vae_model()
_lowerCAmelCase = self.get_sd_image(UpperCAmelCase_ )
_lowerCAmelCase = self.get_generator(UpperCAmelCase_ )
with torch.no_grad():
_lowerCAmelCase = model.encode(UpperCAmelCase_ ).latent_dist
_lowerCAmelCase = dist.sample(generator=UpperCAmelCase_ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
_lowerCAmelCase = sample[0, -1, -3:, -3:].flatten().cpu()
_lowerCAmelCase = torch.tensor(UpperCAmelCase_ )
_lowerCAmelCase = 3E-3 if torch_device != 'mps' else 1E-2
assert torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , atol=UpperCAmelCase_ )
| 491
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json''',
}
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict = "gpt_neox_japanese"
def __init__( self : Optional[Any] , UpperCAmelCase_ : Any=32_000 , UpperCAmelCase_ : Dict=2_560 , UpperCAmelCase_ : Union[str, Any]=32 , UpperCAmelCase_ : Any=32 , UpperCAmelCase_ : Optional[Any]=4 , UpperCAmelCase_ : int="gelu" , UpperCAmelCase_ : Union[str, Any]=1.00 , UpperCAmelCase_ : Any=10_000 , UpperCAmelCase_ : Optional[int]=2_048 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : Tuple=1E-5 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Tuple=31_996 , UpperCAmelCase_ : List[Any]=31_999 , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : List[Any]=0.0 , **UpperCAmelCase_ : str , ) -> int:
"""simple docstring"""
super().__init__(bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_multiple_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = rotary_pct
_lowerCAmelCase = rotary_emb_base
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = use_cache
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = hidden_dropout
| 491
| 1
|
"""simple docstring"""
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
SCREAMING_SNAKE_CASE : int = {
'''E''': 1_2.7_0,
'''T''': 9.0_6,
'''A''': 8.1_7,
'''O''': 7.5_1,
'''I''': 6.9_7,
'''N''': 6.7_5,
'''S''': 6.3_3,
'''H''': 6.0_9,
'''R''': 5.9_9,
'''D''': 4.2_5,
'''L''': 4.0_3,
'''C''': 2.7_8,
'''U''': 2.7_6,
'''M''': 2.4_1,
'''W''': 2.3_6,
'''F''': 2.2_3,
'''G''': 2.0_2,
'''Y''': 1.9_7,
'''P''': 1.9_3,
'''B''': 1.2_9,
'''V''': 0.9_8,
'''K''': 0.7_7,
'''J''': 0.1_5,
'''X''': 0.1_5,
'''Q''': 0.1_0,
'''Z''': 0.0_7,
}
SCREAMING_SNAKE_CASE : Dict = '''ETAOINSHRDLCUMWFGYPBVKJXQZ'''
SCREAMING_SNAKE_CASE : int = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def __lowerCamelCase ( lowerCAmelCase__ ):
A__ = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def __lowerCamelCase ( lowerCAmelCase__ ):
return x[0]
def __lowerCamelCase ( lowerCAmelCase__ ):
A__ = get_letter_count(lowerCAmelCase__ )
A__ = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(lowerCAmelCase__ )
A__ = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find ,reverse=lowerCAmelCase__ )
A__ = ''.join(freq_to_letter[freq] )
A__ = list(freq_to_letter_str.items() )
freq_pairs.sort(key=lowerCAmelCase__ ,reverse=lowerCAmelCase__ )
A__ = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(lowerCAmelCase__ )
def __lowerCamelCase ( lowerCAmelCase__ ):
A__ = get_frequency_order(lowerCAmelCase__ )
A__ = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 260
|
"""simple docstring"""
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class snake_case_ ( _lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_: torch.FloatTensor
SCREAMING_SNAKE_CASE_: Optional[torch.FloatTensor] = None
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__=0.9_9_9 ,lowerCAmelCase__="cosine" ,):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCAmelCase__ ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCAmelCase__ ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
A__ = []
for i in range(lowerCAmelCase__ ):
A__ = i / num_diffusion_timesteps
A__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCAmelCase__ ) / alpha_bar_fn(lowerCAmelCase__ ) ,lowerCAmelCase__ ) )
return torch.tensor(lowerCAmelCase__ ,dtype=torch.floataa )
class snake_case_ ( _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_: List[Any] = 1
@register_to_config
def __init__( self , __a = 1000 , __a = 0.0001 , __a = 0.02 , __a = "linear" , __a = None , __a = True , __a = True , __a = 0 , __a = "epsilon" , __a = 1.0 , **__a , ):
"""simple docstring"""
if kwargs.get('set_alpha_to_one' , __a ) is not None:
A__ = (
'The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'
)
deprecate('set_alpha_to_one' , '1.0.0' , __a , standard_warn=__a )
A__ = kwargs['set_alpha_to_one']
if trained_betas is not None:
A__ = torch.tensor(__a , dtype=torch.floataa )
elif beta_schedule == "linear":
A__ = torch.linspace(__a , __a , __a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
A__ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
A__ = betas_for_alpha_bar(__a )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
A__ = 1.0 - self.betas
A__ = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
A__ = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
A__ = 1.0
# setable values
A__ = None
A__ = torch.from_numpy(np.arange(0 , __a ).copy().astype(np.intaa ) )
def _UpperCAmelCase ( self , __a , __a = None ):
"""simple docstring"""
return sample
def _UpperCAmelCase ( self , __a , __a = None ):
"""simple docstring"""
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'''
f''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'''
f''' maximal {self.config.num_train_timesteps} timesteps.''' )
A__ = num_inference_steps
A__ = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A__ = (np.arange(0 , __a ) * step_ratio).round().copy().astype(np.intaa )
A__ = torch.from_numpy(__a ).to(__a )
self.timesteps += self.config.steps_offset
def _UpperCAmelCase ( self , __a , __a , __a , __a = 0.0 , __a = False , __a = None , __a = True , ):
"""simple docstring"""
A__ = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
A__ = self.alphas_cumprod[timestep]
A__ = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
A__ = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
A__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
A__ = model_output
elif self.config.prediction_type == "sample":
A__ = model_output
A__ = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
A__ = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
A__ = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'''
' `v_prediction`' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
A__ = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A__ = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A__ = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=__a , pred_original_sample=__a )
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 260
| 1
|
'''simple docstring'''
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
_UpperCAmelCase : List[str] = """base_with_context"""
def lowerCAmelCase_ (lowercase__ : List[str] , lowercase__ : Any ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(weights['''token_embedder''']['''embedding'''] ) )
lowerCAmelCase__ = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=snake_case_ )
for lyr_num, lyr in enumerate(model.encoders ):
lowerCAmelCase__ = weights[f'layers_{lyr_num}']
lowerCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
lowerCAmelCase__ = ly_weight["attention"]
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def lowerCAmelCase_ (lowercase__ : List[Any] , lowercase__ : str ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(weights['''input_proj''']['''kernel'''].T ) )
lowerCAmelCase__ = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=snake_case_ )
for lyr_num, lyr in enumerate(model.encoders ):
lowerCAmelCase__ = weights[f'layers_{lyr_num}']
lowerCAmelCase__ = ly_weight["attention"]
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
lowerCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def lowerCAmelCase_ (lowercase__ : List[Any] , lowercase__ : Optional[Any] ) -> str:
'''simple docstring'''
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense0''']['''kernel'''].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense1''']['''kernel'''].T ) )
lowerCAmelCase__ = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=snake_case_ )
lowerCAmelCase__ = nn.Parameter(
torch.FloatTensor(weights['''continuous_inputs_projection''']['''kernel'''].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
lowerCAmelCase__ = weights[f'layers_{lyr_num}']
lowerCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_self_attention_layer_norm''']['''scale'''] ) )
lowerCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_0''']['''DenseGeneral_0''']['''kernel'''].T ) )
lowerCAmelCase__ = ly_weight["self_attention"]
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
lowerCAmelCase__ = ly_weight["MultiHeadDotProductAttention_0"]
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
lowerCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_cross_attention_layer_norm''']['''scale'''] ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
lowerCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_1''']['''DenseGeneral_0''']['''kernel'''].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(weights['''decoder_norm''']['''scale'''] ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(weights['''spec_out_dense''']['''kernel'''].T ) )
return model
def lowerCAmelCase_ (lowercase__ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = checkpoints.load_tax_checkpoint(args.checkpoint_path )
lowerCAmelCase__ = jnp.tree_util.tree_map(onp.array , snake_case_ )
lowerCAmelCase__ = [
"from __gin__ import dynamic_registration",
"from music_spectrogram_diffusion.models.diffusion import diffusion_utils",
"diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0",
"diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()",
]
lowerCAmelCase__ = os.path.join(args.checkpoint_path , '''..''' , '''config.gin''' )
lowerCAmelCase__ = inference.parse_training_gin_file(snake_case_ , snake_case_ )
lowerCAmelCase__ = inference.InferenceModel(args.checkpoint_path , snake_case_ )
lowerCAmelCase__ = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' , variance_type='''fixed_large''' )
lowerCAmelCase__ = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['''inputs'''] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , )
lowerCAmelCase__ = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['''targets_context'''] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , )
lowerCAmelCase__ = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['''targets_context'''] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
lowerCAmelCase__ = load_notes_encoder(ta_checkpoint['''target''']['''token_encoder'''] , snake_case_ )
lowerCAmelCase__ = load_continuous_encoder(ta_checkpoint['''target''']['''continuous_encoder'''] , snake_case_ )
lowerCAmelCase__ = load_decoder(ta_checkpoint['''target''']['''decoder'''] , snake_case_ )
lowerCAmelCase__ = OnnxRuntimeModel.from_pretrained('''kashif/soundstream_mel_decoder''' )
lowerCAmelCase__ = SpectrogramDiffusionPipeline(
notes_encoder=snake_case_ , continuous_encoder=snake_case_ , decoder=snake_case_ , scheduler=snake_case_ , melgan=snake_case_ , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
_UpperCAmelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("--output_path", default=None, type=str, required=True, help="Path to the converted model.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument(
"--checkpoint_path",
default=F'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help="Path to the original jax model checkpoint.",
)
_UpperCAmelCase : List[str] = parser.parse_args()
main(args)
| 703
|
from math import isclose, sqrt
def lowerCAmelCase_ (lowercase__ : float , lowercase__ : float , lowercase__ : float ) -> tuple[float, float, float]:
'''simple docstring'''
lowerCAmelCase__ = point_y / 4 / point_x
lowerCAmelCase__ = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
lowerCAmelCase__ = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
lowerCAmelCase__ = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
lowerCAmelCase__ = outgoing_gradient**2 + 4
lowerCAmelCase__ = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
lowerCAmelCase__ = (point_y - outgoing_gradient * point_x) ** 2 - 1_00
lowerCAmelCase__ = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
lowerCAmelCase__ = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
lowerCAmelCase__ = x_minus if isclose(lowercase__ , lowercase__ ) else x_plus
lowerCAmelCase__ = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def lowerCAmelCase_ (lowercase__ : float = 1.4 , lowercase__ : float = -9.6 ) -> int:
'''simple docstring'''
lowerCAmelCase__ = 0
lowerCAmelCase__ = first_x_coord
lowerCAmelCase__ = first_y_coord
lowerCAmelCase__ = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = next_point(lowercase__ , lowercase__ , lowercase__ )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F'''{solution() = }''')
| 288
| 0
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class _UpperCAmelCase( unittest.TestCase ):
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
_UpperCamelCase = {
'''do_resize''': True,
'''size''': {'''height''': 2_24, '''width''': 2_24},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073],
'''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711],
'''do_convert_rgb''': True,
}
_UpperCamelCase = os.path.join(self.tmpdirname , __a)
with open(self.image_processor_file , '''w''' , encoding='''utf-8''') as fp:
json.dump(__a , __a)
def UpperCAmelCase ( self , **__a) -> List[Any]:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **__a)
def UpperCAmelCase ( self , **__a) -> Any:
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__a)
def UpperCAmelCase ( self , **__a) -> List[str]:
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__a)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta)]
_UpperCamelCase = [Image.fromarray(np.moveaxis(__a , 0 , -1)) for x in image_inputs]
return image_inputs
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a)
processor_slow.save_pretrained(self.tmpdirname)
_UpperCamelCase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__a)
_UpperCamelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a)
processor_fast.save_pretrained(self.tmpdirname)
_UpperCamelCase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , __a)
self.assertIsInstance(processor_fast.tokenizer , __a)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , __a)
self.assertIsInstance(processor_fast.image_processor , __a)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
_UpperCamelCase = self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''')
_UpperCamelCase = self.get_image_processor(do_normalize=__a)
_UpperCamelCase = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=__a)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , __a)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , __a)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a)
_UpperCamelCase = self.prepare_image_inputs()
_UpperCamelCase = image_processor(__a , return_tensors='''np''')
_UpperCamelCase = processor(images=__a , return_tensors='''np''')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a)
_UpperCamelCase = '''Alexandra,T-shirt的价格是15便士。'''
_UpperCamelCase = processor(text=__a)
_UpperCamelCase = tokenizer(__a)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a)
_UpperCamelCase = '''Alexandra,T-shirt的价格是15便士。'''
_UpperCamelCase = self.prepare_image_inputs()
_UpperCamelCase = processor(text=__a , images=__a)
self.assertListEqual(list(inputs.keys()) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''])
# test if it raises when no input is passed
with pytest.raises(__a):
processor()
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a)
_UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCamelCase = processor.batch_decode(__a)
_UpperCamelCase = tokenizer.batch_decode(__a)
self.assertListEqual(__a , __a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a)
_UpperCamelCase = '''Alexandra,T-shirt的价格是15便士。'''
_UpperCamelCase = self.prepare_image_inputs()
_UpperCamelCase = processor(text=__a , images=__a)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
| 19
|
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def lowerCamelCase__ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
if (
(cp >= 0x4E00 and cp <= 0x9FFF)
or (cp >= 0x3400 and cp <= 0x4DBF) #
or (cp >= 0x2_0000 and cp <= 0x2_A6DF) #
or (cp >= 0x2_A700 and cp <= 0x2_B73F) #
or (cp >= 0x2_B740 and cp <= 0x2_B81F) #
or (cp >= 0x2_B820 and cp <= 0x2_CEAF) #
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2_F800 and cp <= 0x2_FA1F) #
): #
return True
return False
def lowerCamelCase__ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
for char in word:
_UpperCamelCase = ord(__snake_case )
if not _is_chinese_char(__snake_case ):
return 0
return 1
def lowerCamelCase__ ( __snake_case ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = set()
for token in tokens:
_UpperCamelCase = len(__snake_case ) > 1 and is_chinese(__snake_case )
if chinese_word:
word_set.add(__snake_case )
_UpperCamelCase = list(__snake_case )
return word_list
def lowerCamelCase__ ( __snake_case, __snake_case ) -> int:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
_UpperCamelCase = max([len(__snake_case ) for w in chinese_word_set] )
_UpperCamelCase = bert_tokens
_UpperCamelCase , _UpperCamelCase = 0, len(__snake_case )
while start < end:
_UpperCamelCase = True
if is_chinese(bert_word[start] ):
_UpperCamelCase = min(end - start, __snake_case )
for i in range(__snake_case, 1, -1 ):
_UpperCamelCase = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1, start + i ):
_UpperCamelCase = '''##''' + bert_word[j]
_UpperCamelCase = start + i
_UpperCamelCase = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = []
for i in range(0, len(__snake_case ), 1_00 ):
_UpperCamelCase = ltp_tokenizer.pipeline(lines[i : i + 1_00], tasks=['''cws'''] ).cws
_UpperCamelCase = [get_chinese_word(__snake_case ) for r in res]
ltp_res.extend(__snake_case )
assert len(__snake_case ) == len(__snake_case )
_UpperCamelCase = []
for i in range(0, len(__snake_case ), 1_00 ):
_UpperCamelCase = bert_tokenizer(lines[i : i + 1_00], add_special_tokens=__snake_case, truncation=__snake_case, max_length=5_12 )
bert_res.extend(res['''input_ids'''] )
assert len(__snake_case ) == len(__snake_case )
_UpperCamelCase = []
for input_ids, chinese_word in zip(__snake_case, __snake_case ):
_UpperCamelCase = []
for id in input_ids:
_UpperCamelCase = bert_tokenizer._convert_id_to_token(__snake_case )
input_tokens.append(__snake_case )
_UpperCamelCase = add_sub_symbol(__snake_case, __snake_case )
_UpperCamelCase = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__snake_case ):
if token[:2] == "##":
_UpperCamelCase = token[2:]
# save chinese tokens' pos
if len(__snake_case ) == 1 and _is_chinese_char(ord(__snake_case ) ):
ref_id.append(__snake_case )
ref_ids.append(__snake_case )
assert len(__snake_case ) == len(__snake_case )
return ref_ids
def lowerCamelCase__ ( __snake_case ) -> Optional[int]:
"""simple docstring"""
with open(args.file_name, '''r''', encoding='''utf-8''' ) as f:
_UpperCamelCase = f.readlines()
_UpperCamelCase = [line.strip() for line in data if len(__snake_case ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_UpperCamelCase = LTP(args.ltp ) # faster in GPU device
_UpperCamelCase = BertTokenizer.from_pretrained(args.bert )
_UpperCamelCase = prepare_ref(__snake_case, __snake_case, __snake_case )
with open(args.save_path, '''w''', encoding='''utf-8''' ) as f:
_UpperCamelCase = [json.dumps(__snake_case ) + '''\n''' for ref in ref_ids]
f.writelines(__snake_case )
if __name__ == "__main__":
_a = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
required=False,
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""",
required=False,
type=str,
default="""./resources/ltp""",
help="""resources for LTP tokenizer, usually a path""",
)
parser.add_argument(
"""--bert""",
required=False,
type=str,
default="""./resources/robert""",
help="""resources for Bert tokenizer""",
)
parser.add_argument(
"""--save_path""",
required=False,
type=str,
default="""./resources/ref.txt""",
help="""path to save res""",
)
_a = parser.parse_args()
main(args)
| 19
| 1
|
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
__UpperCAmelCase = logging.getLogger(__name__)
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : List[str] = "sequence-classification"
def __init__( self , lowerCamelCase_ ) -> List[Any]:
if type(lowerCamelCase_ ) == dict:
lowerCAmelCase__ = Namespace(**lowerCamelCase_ )
lowerCAmelCase__ = glue_output_modes[hparams.task]
lowerCAmelCase__ = glue_tasks_num_labels[hparams.task]
super().__init__(lowerCamelCase_ , lowerCamelCase_ , self.mode )
def __SCREAMING_SNAKE_CASE ( self , **lowerCamelCase_ ) -> List[Any]:
return self.model(**lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
lowerCAmelCase__ = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCAmelCase__ = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
lowerCAmelCase__ = self(**lowerCamelCase_ )
lowerCAmelCase__ = outputs[0]
lowerCAmelCase__ = self.trainer.lr_schedulers[0]['''scheduler''']
lowerCAmelCase__ = {'''loss''': loss, '''rate''': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = self.hparams
lowerCAmelCase__ = processors[args.task]()
lowerCAmelCase__ = processor.get_labels()
for mode in ["train", "dev"]:
lowerCAmelCase__ = self._feature_file(lowerCamelCase_ )
if os.path.exists(lowerCamelCase_ ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , lowerCamelCase_ )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
lowerCAmelCase__ = (
processor.get_dev_examples(args.data_dir )
if mode == '''dev'''
else processor.get_train_examples(args.data_dir )
)
lowerCAmelCase__ = convert_examples_to_features(
lowerCamelCase_ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('''Saving features into cached file %s''' , lowerCamelCase_ )
torch.save(lowerCamelCase_ , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = False ) -> DataLoader:
lowerCAmelCase__ = '''dev''' if mode == '''test''' else mode
lowerCAmelCase__ = self._feature_file(lowerCamelCase_ )
logger.info('''Loading features from cached file %s''' , lowerCamelCase_ )
lowerCAmelCase__ = torch.load(lowerCamelCase_ )
lowerCAmelCase__ = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowerCAmelCase__ = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
lowerCAmelCase__ = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
lowerCAmelCase__ = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
lowerCAmelCase__ = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) , batch_size=lowerCamelCase_ , shuffle=lowerCamelCase_ , )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
lowerCAmelCase__ = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCAmelCase__ = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
lowerCAmelCase__ = self(**lowerCamelCase_ )
lowerCAmelCase__ , lowerCAmelCase__ = outputs[:2]
lowerCAmelCase__ = logits.detach().cpu().numpy()
lowerCAmelCase__ = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> tuple:
lowerCAmelCase__ = torch.stack([x['''val_loss'''] for x in outputs] ).mean().detach().cpu().item()
lowerCAmelCase__ = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
lowerCAmelCase__ = np.argmax(lowerCamelCase_ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
lowerCAmelCase__ = np.squeeze(lowerCamelCase_ )
lowerCAmelCase__ = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
lowerCAmelCase__ = [[] for _ in range(out_label_ids.shape[0] )]
lowerCAmelCase__ = [[] for _ in range(out_label_ids.shape[0] )]
lowerCAmelCase__ = {**{'''val_loss''': val_loss_mean}, **compute_metrics(self.hparams.task , lowerCamelCase_ , lowerCamelCase_ )}
lowerCAmelCase__ = dict(results.items() )
lowerCAmelCase__ = results
return ret, preds_list, out_label_list
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> dict:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self._eval_end(lowerCamelCase_ )
lowerCAmelCase__ = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> dict:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self._eval_end(lowerCamelCase_ )
lowerCAmelCase__ = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
BaseTransformer.add_model_specific_args(lowerCamelCase_ , lowerCamelCase_ )
parser.add_argument(
'''--max_seq_length''' , default=1_28 , type=lowerCamelCase_ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--task''' , default='''''' , type=lowerCamelCase_ , required=lowerCamelCase_ , help='''The GLUE task to run''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=lowerCamelCase_ , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
def _snake_case ( ) -> List[Any]:
lowerCAmelCase__ = argparse.ArgumentParser()
add_generic_args(A , os.getcwd() )
lowerCAmelCase__ = GLUETransformer.add_model_specific_args(A , os.getcwd() )
lowerCAmelCase__ = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
lowerCAmelCase__ = os.path.join(
'''./results''' , F"""{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}""" , )
os.makedirs(args.output_dir )
lowerCAmelCase__ = GLUETransformer(A )
lowerCAmelCase__ = generic_train(A , A )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
lowerCAmelCase__ = sorted(glob.glob(os.path.join(args.output_dir , '''checkpoint-epoch=*.ckpt''' ) , recursive=A ) )
lowerCAmelCase__ = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(A )
if __name__ == "__main__":
main()
| 98
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
__UpperCAmelCase = False
class a__ ( unittest.TestCase ):
'''simple docstring'''
pass
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = VersatileDiffusionImageVariationPipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe(
image=lowerCamelCase_ , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
lowerCAmelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 98
| 1
|
'''simple docstring'''
def __lowerCamelCase ( lowerCAmelCase_ = 1000 ) -> int:
_a : int = 2**power
_a : Any = str(lowerCAmelCase_ )
_a : Any = list(lowerCAmelCase_ )
_a : Union[str, Any] = 0
for i in list_num:
sum_of_num += int(lowerCAmelCase_ )
return sum_of_num
if __name__ == "__main__":
__lowerCAmelCase = int(input('''Enter the power of 2: ''').strip())
print('''2 ^ ''', power, ''' = ''', 2**power)
__lowerCAmelCase = solution(power)
print('''Sum of the digits is: ''', result)
| 358
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
__lowerCAmelCase = {
'''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ErnieForCausalLM''',
'''ErnieForMaskedLM''',
'''ErnieForMultipleChoice''',
'''ErnieForNextSentencePrediction''',
'''ErnieForPreTraining''',
'''ErnieForQuestionAnswering''',
'''ErnieForSequenceClassification''',
'''ErnieForTokenClassification''',
'''ErnieModel''',
'''ErniePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 358
| 1
|
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class lowercase ( nn.Module ):
def __init__( self , _a = 16 , _a = 88 , _a = None , _a = 1 , _a = 0.0 , _a = 32 , _a = None , _a = False , _a = None , _a = None , _a = "geglu" , _a = None , ) -> str:
super().__init__()
_A : Union[str, Any] = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=_a , attention_head_dim=_a , in_channels=_a , num_layers=_a , dropout=_a , norm_num_groups=_a , cross_attention_dim=_a , attention_bias=_a , sample_size=_a , num_vector_embeds=_a , activation_fn=_a , num_embeds_ada_norm=_a , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_A : Dict = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_A : Union[str, Any] = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_A : Tuple = [1, 0]
def a__ ( self , _a , _a , _a=None , _a=None , _a=None , _a = True , ) -> List[Any]:
_A : List[Any] = hidden_states
_A : List[str] = []
_A : Optional[Any] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_A : Tuple = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_A : List[str] = self.transformer_index_for_condition[i]
_A : int = self.transformers[transformer_index](
_a , encoder_hidden_states=_a , timestep=_a , cross_attention_kwargs=_a , return_dict=_a , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_A : List[Any] = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_A : Any = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=_a )
| 54
|
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class lowercase ( unittest.TestCase ):
_a = MODEL_FOR_MASKED_LM_MAPPING
_a = TF_MODEL_FOR_MASKED_LM_MAPPING
def a__ ( self ) -> Tuple:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def a__ ( self ) -> Any:
_A : Optional[Any] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""tf""" )
_A : Optional[int] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"""sequence""": """My name is grouped""", """score""": 2.1e-05, """token""": 3_8015, """token_str""": """ grouped"""},
{"""sequence""": """My name is accuser""", """score""": 2.1e-05, """token""": 2_5506, """token_str""": """ accuser"""},
] , )
_A : Tuple = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{
"""sequence""": """The largest city in France is grouped""",
"""score""": 2.1e-05,
"""token""": 3_8015,
"""token_str""": """ grouped""",
},
{
"""sequence""": """The largest city in France is accuser""",
"""score""": 2.1e-05,
"""token""": 2_5506,
"""token_str""": """ accuser""",
},
] , )
_A : List[str] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"""sequence""": """My name is Clara""", """score""": 2e-05, """token""": 1_3606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Patrick""", """score""": 2e-05, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 1.9e-05, """token""": 2941, """token_str""": """ Te"""},
] , )
@require_torch
def a__ ( self ) -> str:
_A : Any = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""pt""" )
_A : List[Any] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"""sequence""": """My name is Maul""", """score""": 2.2e-05, """token""": 3_5676, """token_str""": """ Maul"""},
{"""sequence""": """My name isELS""", """score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS"""},
] , )
_A : Optional[Any] = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{
"""sequence""": """The largest city in France is Maul""",
"""score""": 2.2e-05,
"""token""": 3_5676,
"""token_str""": """ Maul""",
},
{"""sequence""": """The largest city in France isELS""", """score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS"""},
] , )
_A : Optional[int] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"""sequence""": """My name is Patrick""", """score""": 2.1e-05, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 2e-05, """token""": 2941, """token_str""": """ Te"""},
{"""sequence""": """My name is Clara""", """score""": 2e-05, """token""": 1_3606, """token_str""": """ Clara"""},
] , )
_A : str = unmasker("""My name is <mask> <mask>""" , top_k=2 )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
[
{
"""score""": 2.2e-05,
"""token""": 3_5676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is Maul<mask></s>""",
},
{"""score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""},
],
[
{
"""score""": 2.2e-05,
"""token""": 3_5676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is<mask> Maul</s>""",
},
{"""score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""},
],
] , )
@require_torch_gpu
def a__ ( self ) -> Union[str, Any]:
_A : int = pipeline("""fill-mask""" , model="""hf-internal-testing/tiny-random-distilbert""" , device=0 , framework="""pt""" )
# convert model to fp16
pipe.model.half()
_A : Optional[Any] = pipe("""Paris is the [MASK] of France.""" )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(_a , _a )
@slow
@require_torch
def a__ ( self ) -> Optional[int]:
_A : Optional[Any] = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""pt""" )
self.run_large_test(_a )
@slow
@require_tf
def a__ ( self ) -> Tuple:
_A : str = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""tf""" )
self.run_large_test(_a )
def a__ ( self , _a ) -> Tuple:
_A : Optional[int] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(_a ) , [
{"""sequence""": """My name is John""", """score""": 0.008, """token""": 610, """token_str""": """ John"""},
{"""sequence""": """My name is Chris""", """score""": 0.007, """token""": 1573, """token_str""": """ Chris"""},
] , )
_A : int = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(_a ) , [
{
"""sequence""": """The largest city in France is Paris""",
"""score""": 0.251,
"""token""": 2201,
"""token_str""": """ Paris""",
},
{
"""sequence""": """The largest city in France is Lyon""",
"""score""": 0.214,
"""token""": 1_2790,
"""token_str""": """ Lyon""",
},
] , )
_A : Optional[Any] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(_a ) , [
{"""sequence""": """My name is Patrick""", """score""": 0.005, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Clara""", """score""": 0.000, """token""": 1_3606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Te""", """score""": 0.000, """token""": 2941, """token_str""": """ Te"""},
] , )
@require_torch
def a__ ( self ) -> Tuple:
_A : List[str] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""pt""" )
_A : str = None
_A : Union[str, Any] = None
self.run_pipeline_test(_a , [] )
@require_tf
def a__ ( self ) -> Union[str, Any]:
_A : Tuple = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""tf""" )
_A : Any = None
_A : Dict = None
self.run_pipeline_test(_a , [] )
def a__ ( self , _a , _a , _a ) -> Any:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""" )
_A : Optional[Any] = FillMaskPipeline(model=_a , tokenizer=_a )
_A : Tuple = [
F'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def a__ ( self , _a , _a ) -> Dict:
_A : Dict = fill_masker.tokenizer
_A : List[str] = fill_masker.model
_A : List[str] = fill_masker(
F'''This is a {tokenizer.mask_token}''' , )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : Optional[Any] = fill_masker([F'''This is a {tokenizer.mask_token}'''] )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : List[str] = fill_masker([F'''This is a {tokenizer.mask_token}''', F'''Another {tokenizer.mask_token} great test.'''] )
self.assertEqual(
_a , [
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
] , )
with self.assertRaises(_a ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(_a ):
fill_masker("""This is""" )
self.run_test_top_k(_a , _a )
self.run_test_targets(_a , _a )
self.run_test_top_k_targets(_a , _a )
self.fill_mask_with_duplicate_targets_and_top_k(_a , _a )
self.fill_mask_with_multiple_masks(_a , _a )
def a__ ( self , _a , _a ) -> List[str]:
_A : int = tokenizer.get_vocab()
_A : str = sorted(vocab.keys() )[:2]
# Pipeline argument
_A : Tuple = FillMaskPipeline(model=_a , tokenizer=_a , targets=_a )
_A : Optional[int] = fill_masker(F'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : List[str] = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , _a )
_A : Union[str, Any] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(_a ) )
# Call argument
_A : str = FillMaskPipeline(model=_a , tokenizer=_a )
_A : Optional[int] = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_a )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : int = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , _a )
_A : Any = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(_a ) )
# Score equivalence
_A : int = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_a )
_A : Optional[int] = [top_mask["""token_str"""] for top_mask in outputs]
_A : Union[str, Any] = [top_mask["""score"""] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_a ) == set(_a ):
_A : Tuple = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_a )
_A : Union[str, Any] = [top_mask["""score"""] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(_a ) , nested_simplify(_a ) )
# Raises with invalid
with self.assertRaises(_a ):
_A : str = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(_a ):
_A : Optional[Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[""""""] )
with self.assertRaises(_a ):
_A : int = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets="""""" )
def a__ ( self , _a , _a ) -> Optional[Any]:
_A : str = FillMaskPipeline(model=_a , tokenizer=_a , top_k=2 )
_A : str = fill_masker(F'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : Union[str, Any] = FillMaskPipeline(model=_a , tokenizer=_a )
_A : Union[str, Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
self.assertEqual(nested_simplify(_a ) , nested_simplify(_a ) )
def a__ ( self , _a , _a ) -> List[Any]:
_A : Union[str, Any] = tokenizer.get_vocab()
_A : int = FillMaskPipeline(model=_a , tokenizer=_a )
# top_k=2, ntargets=3
_A : List[str] = sorted(vocab.keys() )[:3]
_A : Tuple = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=_a )
# If we use the most probably targets, and filter differently, we should still
# have the same results
_A : Any = [el["""token_str"""] for el in sorted(_a , key=lambda _a : x["score"] , reverse=_a )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_a ).issubset(_a ):
_A : Any = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=_a )
# They should yield exactly the same result
self.assertEqual(nested_simplify(_a ) , nested_simplify(_a ) )
def a__ ( self , _a , _a ) -> str:
_A : Optional[int] = FillMaskPipeline(model=_a , tokenizer=_a )
_A : List[Any] = tokenizer.get_vocab()
# String duplicates + id duplicates
_A : Optional[Any] = sorted(vocab.keys() )[:3]
_A : Optional[Any] = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_A : Union[str, Any] = fill_masker(F'''My name is {tokenizer.mask_token}''' , targets=_a , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(_a ) , 3 )
def a__ ( self , _a , _a ) -> Tuple:
_A : Any = FillMaskPipeline(model=_a , tokenizer=_a )
_A : Optional[Any] = fill_masker(
F'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
_a , [
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
] , )
| 54
| 1
|
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def a ( *snake_case__: Optional[int] ):
'''simple docstring'''
with open(snake_case__ , '''r''' ) as fh:
fcntl.flock(snake_case__ , fcntl.LOCK_EX )
try:
print(*snake_case__ )
finally:
fcntl.flock(snake_case__ , fcntl.LOCK_UN )
__a = int(os.environ['LOCAL_RANK'])
torch.cuda.set_device(local_rank)
__a = torch.device('cuda', local_rank)
__a = socket.gethostname()
__a = f"[{hostname}-{local_rank}]"
try:
# test distributed
dist.init_process_group('nccl')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__a = dist.get_rank()
__a = dist.get_world_size()
printflock(f"{gpu} is OK (global rank: {rank}/{world_size})")
dist.barrier()
if rank == 0:
printflock(f"pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}")
except Exception:
printflock(f"{gpu} is broken")
raise
| 97
|
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
_SCREAMING_SNAKE_CASE = TypeVar("T")
class SCREAMING_SNAKE_CASE_ ( Generic[T] ):
"""simple docstring"""
def __init__( self :List[Any], snake_case :bool = True):
"""simple docstring"""
_lowercase ={} # dictionary of lists
_lowercase =directed
def UpperCamelCase__ ( self :Optional[Any], snake_case :T, snake_case :T):
"""simple docstring"""
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(snake_case)
self.adj_list[destination_vertex].append(snake_case)
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(snake_case)
_lowercase =[source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(snake_case)
_lowercase =[destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
_lowercase =[destination_vertex]
_lowercase =[source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(snake_case)
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(snake_case)
_lowercase =[]
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
_lowercase =[destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
_lowercase =[destination_vertex]
_lowercase =[]
return self
def __repr__( self :Optional[int]):
"""simple docstring"""
return pformat(self.adj_list)
| 181
| 0
|
"""simple docstring"""
from __future__ import annotations
import numpy as np
def __magic_name__ ( __snake_case : list[float] ) -> int:
return np.maximum(0 , UpperCamelCase__ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 720
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_A : Union[str, Any] = {
"""configuration_pix2struct""": [
"""PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Pix2StructConfig""",
"""Pix2StructTextConfig""",
"""Pix2StructVisionConfig""",
],
"""processing_pix2struct""": ["""Pix2StructProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Tuple = ["""Pix2StructImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Dict = [
"""PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Pix2StructPreTrainedModel""",
"""Pix2StructForConditionalGeneration""",
"""Pix2StructVisionModel""",
"""Pix2StructTextModel""",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
_A : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 518
| 0
|
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
lowercase : List[str] = logging.get_logger(__name__)
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
def __init__( self , *lowercase , **lowercase) -> None:
'''simple docstring'''
warnings.warn(
'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use YolosImageProcessor instead.' , lowercase , )
super().__init__(*lowercase , **lowercase)
| 302
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase : int = logging.get_logger(__name__)
lowercase : Dict = {
"""hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : Dict = '''yolos'''
def __init__( self , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1e-12 , lowercase=[512, 864] , lowercase=16 , lowercase=3 , lowercase=True , lowercase=100 , lowercase=True , lowercase=False , lowercase=1 , lowercase=5 , lowercase=2 , lowercase=5 , lowercase=2 , lowercase=0.1 , **lowercase , ) -> List[str]:
'''simple docstring'''
super().__init__(**lowercase)
a__ : int = hidden_size
a__ : str = num_hidden_layers
a__ : List[str] = num_attention_heads
a__ : List[str] = intermediate_size
a__ : Any = hidden_act
a__ : str = hidden_dropout_prob
a__ : Union[str, Any] = attention_probs_dropout_prob
a__ : Any = initializer_range
a__ : List[Any] = layer_norm_eps
a__ : Union[str, Any] = image_size
a__ : Optional[int] = patch_size
a__ : Tuple = num_channels
a__ : Optional[Any] = qkv_bias
a__ : Union[str, Any] = num_detection_tokens
a__ : Union[str, Any] = use_mid_position_embeddings
a__ : List[Any] = auxiliary_loss
# Hungarian matcher
a__ : Union[str, Any] = class_cost
a__ : Union[str, Any] = bbox_cost
a__ : Dict = giou_cost
# Loss coefficients
a__ : Optional[Any] = bbox_loss_coefficient
a__ : List[Any] = giou_loss_coefficient
a__ : str = eos_coefficient
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : int = version.parse('''1.11''' )
@property
def __lowercase ( self) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def __lowercase ( self) -> float:
'''simple docstring'''
return 1e-4
@property
def __lowercase ( self) -> int:
'''simple docstring'''
return 12
| 302
| 1
|
"""simple docstring"""
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = model.config
_lowerCAmelCase : List[Any] = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
_lowerCAmelCase : List[Any] = MBartConfig(
is_decoder=_lowerCamelCase , is_encoder_decoder=_lowerCamelCase , add_cross_attention=_lowerCamelCase , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=_lowerCamelCase , add_final_layer_norm=_lowerCamelCase , )
return encoder_config, decoder_config
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if "encoder.model" in name:
_lowerCAmelCase : int = name.replace('encoder.model' , 'encoder' )
if "decoder.model" in name:
_lowerCAmelCase : Dict = name.replace('decoder.model' , 'decoder' )
if "patch_embed.proj" in name:
_lowerCAmelCase : int = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_lowerCAmelCase : Tuple = name.replace('patch_embed.norm' , 'embeddings.norm' )
if name.startswith('encoder' ):
if "layers" in name:
_lowerCAmelCase : Any = """encoder.""" + name
if "attn.proj" in name:
_lowerCAmelCase : Any = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name and "mask" not in name:
_lowerCAmelCase : Tuple = name.replace('attn' , 'attention.self' )
if "norm1" in name:
_lowerCAmelCase : Dict = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
_lowerCAmelCase : Union[str, Any] = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
_lowerCAmelCase : List[Any] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_lowerCAmelCase : Dict = name.replace('mlp.fc2' , 'output.dense' )
if name == "encoder.norm.weight":
_lowerCAmelCase : str = """encoder.layernorm.weight"""
if name == "encoder.norm.bias":
_lowerCAmelCase : Dict = """encoder.layernorm.bias"""
return name
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_lowerCAmelCase : List[str] = orig_state_dict.pop(_lowerCamelCase )
if "qkv" in key:
_lowerCAmelCase : int = key.split('.' )
_lowerCAmelCase : List[str] = int(key_split[3] )
_lowerCAmelCase : Optional[Any] = int(key_split[5] )
_lowerCAmelCase : str = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_lowerCAmelCase : Union[str, Any] = val[:dim, :]
_lowerCAmelCase : Optional[int] = val[dim : dim * 2, :]
_lowerCAmelCase : Union[str, Any] = val[-dim:, :]
else:
_lowerCAmelCase : List[Any] = val[:dim]
_lowerCAmelCase : Optional[Any] = val[dim : dim * 2]
_lowerCAmelCase : Dict = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
_lowerCAmelCase : List[str] = val
return orig_state_dict
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Dict = DonutModel.from_pretrained(_lowerCamelCase ).eval()
# load HuggingFace model
_lowerCAmelCase : List[str] = get_configs(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = DonutSwinModel(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = MBartForCausalLM(_lowerCamelCase )
_lowerCAmelCase : Any = VisionEncoderDecoderModel(encoder=_lowerCamelCase , decoder=_lowerCamelCase )
model.eval()
_lowerCAmelCase : List[Any] = original_model.state_dict()
_lowerCAmelCase : List[Any] = convert_state_dict(_lowerCamelCase , _lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
# verify results on scanned document
_lowerCAmelCase : str = load_dataset('hf-internal-testing/example-documents' )
_lowerCAmelCase : List[Any] = dataset["""test"""][0]["""image"""].convert('RGB' )
_lowerCAmelCase : List[Any] = XLMRobertaTokenizerFast.from_pretrained(_lowerCamelCase , from_slow=_lowerCamelCase )
_lowerCAmelCase : Tuple = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
_lowerCAmelCase : Optional[int] = DonutProcessor(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Tuple = processor(_lowerCamelCase , return_tensors='pt' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
_lowerCAmelCase : List[str] = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
_lowerCAmelCase : Union[str, Any] = """When is the coffee break?"""
_lowerCAmelCase : List[str] = task_prompt.replace('{user_input}' , _lowerCamelCase )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
_lowerCAmelCase : str = """<s_rvlcdip>"""
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
_lowerCAmelCase : str = """<s_cord>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
_lowerCAmelCase : Any = """s_cord-v2>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
_lowerCAmelCase : List[str] = """<s_zhtrainticket>"""
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
_lowerCAmelCase : int = """hello world"""
else:
raise ValueError('Model name not supported' )
_lowerCAmelCase : List[Any] = original_model.decoder.tokenizer(_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_tensors='pt' )[
"""input_ids"""
]
_lowerCAmelCase : Tuple = original_model.encoder.model.patch_embed(_lowerCamelCase )
_lowerCAmelCase : str = model.encoder.embeddings(_lowerCamelCase )
assert torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 )
# verify encoder hidden states
_lowerCAmelCase : Any = original_model.encoder(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = model.encoder(_lowerCamelCase ).last_hidden_state
assert torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-2 )
# verify decoder hidden states
_lowerCAmelCase : int = original_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ).logits
_lowerCAmelCase : Dict = model(_lowerCamelCase , decoder_input_ids=_lowerCamelCase ).logits
assert torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
model.push_to_hub('nielsr/' + model_name.split('/' )[-1] , commit_message='Update model' )
processor.push_to_hub('nielsr/' + model_name.split('/' )[-1] , commit_message='Update model' )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""naver-clova-ix/donut-base-finetuned-docvqa""",
required=False,
type=str,
help="""Name of the original model you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
required=False,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub.""",
)
_lowerCAmelCase = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 707
|
"""simple docstring"""
import baseaa
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaaencode(string.encode('utf-8' ) )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaadecode(_lowerCamelCase ).decode('utf-8' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16
| 0
|
import os
def __magic_name__ ( lowerCAmelCase_ = "input.txt"):
'''simple docstring'''
with open(os.path.join(os.path.dirname(lowerCAmelCase_) , lowerCAmelCase_)) as input_file:
lowerCamelCase_ : Dict = [
[int(lowerCAmelCase_) for element in line.split(",")]
for line in input_file.readlines()
]
lowerCamelCase_ : str = len(lowerCAmelCase_)
lowerCamelCase_ : Any = len(matrix[0])
lowerCamelCase_ : Optional[Any] = [[-1 for _ in range(lowerCAmelCase_)] for _ in range(lowerCAmelCase_)]
for i in range(lowerCAmelCase_):
lowerCamelCase_ : Union[str, Any] = matrix[i][0]
for j in range(1 , lowerCAmelCase_):
for i in range(lowerCAmelCase_):
lowerCamelCase_ : List[Any] = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , lowerCAmelCase_):
lowerCamelCase_ : Any = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j])
for i in range(rows - 2 , -1 , -1):
lowerCamelCase_ : Optional[int] = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j])
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums)
if __name__ == "__main__":
print(f'''{solution() = }''')
| 250
|
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=16 , a_=2 , a_=0.02 , a_=4 , ):
lowerCamelCase_ : List[str] = parent
lowerCamelCase_ : Any = batch_size
lowerCamelCase_ : List[str] = seq_length
lowerCamelCase_ : Dict = is_training
lowerCamelCase_ : List[Any] = use_attention_mask
lowerCamelCase_ : Tuple = use_token_type_ids
lowerCamelCase_ : Dict = use_labels
lowerCamelCase_ : Optional[Any] = vocab_size
lowerCamelCase_ : List[Any] = hidden_size
lowerCamelCase_ : Union[str, Any] = num_hidden_layers
lowerCamelCase_ : List[Any] = num_attention_heads
lowerCamelCase_ : Dict = intermediate_size
lowerCamelCase_ : List[Any] = hidden_act
lowerCamelCase_ : Optional[Any] = hidden_dropout_prob
lowerCamelCase_ : Tuple = attention_probs_dropout_prob
lowerCamelCase_ : str = max_position_embeddings
lowerCamelCase_ : str = type_vocab_size
lowerCamelCase_ : Tuple = type_sequence_label_size
lowerCamelCase_ : str = initializer_range
lowerCamelCase_ : Optional[Any] = num_choices
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ : Any = None
if self.use_attention_mask:
lowerCamelCase_ : int = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ : Dict = None
if self.use_token_type_ids:
lowerCamelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ : Tuple = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _UpperCamelCase ( self ):
lowerCamelCase_ : str = self.prepare_config_and_inputs()
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : Optional[int] = config_and_inputs
lowerCamelCase_ : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class lowerCAmelCase__ ( __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : Union[str, Any] = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = FlaxRoFormerModelTester(self )
@slow
def _UpperCamelCase ( self ):
for model_class_name in self.all_model_classes:
lowerCamelCase_ : List[str] = model_class_name.from_pretrained("junnyu/roformer_chinese_small" , from_pt=a_ )
lowerCamelCase_ : Optional[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(a_ )
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = FlaxRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" )
lowerCamelCase_ : Union[str, Any] = jnp.array([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase_ : Any = model(a_ )[0]
lowerCamelCase_ : List[str] = 5_0000
lowerCamelCase_ : List[Any] = (1, 6, vocab_size)
self.assertEqual(output.shape , a_ )
lowerCamelCase_ : Tuple = jnp.array(
[[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , a_ , atol=1E-4 ) )
| 250
| 1
|
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
UpperCAmelCase_ = pytest.mark.integration
@pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] )->int:
inspect_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = path + '''.py'''
assert script_name in os.listdir(_SCREAMING_SNAKE_CASE )
assert "__pycache__" not in os.listdir(_SCREAMING_SNAKE_CASE )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' , ['''accuracy'''] )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Union[str, Any] )->str:
inspect_metric(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = path + '''.py'''
assert script_name in os.listdir(_SCREAMING_SNAKE_CASE )
assert "__pycache__" not in os.listdir(_SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Dict )->Tuple:
_lowerCAmelCase = get_dataset_config_info(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str )->Dict:
with pytest.raises(_SCREAMING_SNAKE_CASE ):
get_dataset_config_info(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'''path, expected''' , [
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] , )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[Any] )->List[Any]:
_lowerCAmelCase = get_dataset_config_names(_SCREAMING_SNAKE_CASE )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' , [
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] , )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[Any] )->Union[str, Any]:
_lowerCAmelCase = get_dataset_infos(_SCREAMING_SNAKE_CASE )
assert list(infos.keys() ) == expected_configs
_lowerCAmelCase = expected_configs[0]
assert expected_config in infos
_lowerCAmelCase = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Any )->Optional[int]:
_lowerCAmelCase = get_dataset_infos(_SCREAMING_SNAKE_CASE )
assert expected_config in infos
_lowerCAmelCase = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Dict )->Optional[Any]:
with pytest.raises(_SCREAMING_SNAKE_CASE ):
get_dataset_split_names(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
| 664
|
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
UpperCAmelCase_ = {"UserAgent": UserAgent().random}
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict )->dict:
_lowerCAmelCase = script.contents[0]
_lowerCAmelCase = json.loads(data[data.find('''{"config"''' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class UpperCAmelCase :
def __init__( self , _lowerCAmelCase ):
_lowerCAmelCase = F'''https://www.instagram.com/{username}/'''
_lowerCAmelCase = self.get_json()
def __lowerCAmelCase ( self ):
_lowerCAmelCase = requests.get(self.url , headers=_lowerCAmelCase ).text
_lowerCAmelCase = BeautifulSoup(_lowerCAmelCase , '''html.parser''' ).find_all('''script''' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
return F'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self ):
return F'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def __lowerCAmelCase ( self ):
return self.user_data["username"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["full_name"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["biography"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["business_email"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["external_url"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["edge_followed_by"]["count"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["edge_follow"]["count"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["profile_pic_url_hd"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["is_verified"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["is_private"]
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str = "github" )->None:
import os
if os.environ.get('''CI''' ):
return # test failing on GitHub Actions
_lowerCAmelCase = InstagramUser(_SCREAMING_SNAKE_CASE )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , _SCREAMING_SNAKE_CASE )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_5_0
assert instagram_user.number_of_followers > 1_2_0_0_0_0
assert instagram_user.number_of_followings > 1_5
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('''https://instagram.''' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = InstagramUser("github")
print(instagram_user)
print(F"""{instagram_user.number_of_posts = }""")
print(F"""{instagram_user.number_of_followers = }""")
print(F"""{instagram_user.number_of_followings = }""")
print(F"""{instagram_user.email = }""")
print(F"""{instagram_user.website = }""")
print(F"""{instagram_user.profile_picture_url = }""")
print(F"""{instagram_user.is_verified = }""")
print(F"""{instagram_user.is_private = }""")
| 664
| 1
|
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
SCREAMING_SNAKE_CASE : Any = float("nan")
class __lowercase :
def __init__( self , a__ ) -> Optional[Any]:
'''simple docstring'''
A_ = sys.stdout
A_ = open(a__ , '''a''' )
def __getattr__( self , a__ ) -> Union[str, Any]:
'''simple docstring'''
return getattr(self.stdout , a__ )
def lowerCAmelCase_ ( self , a__ ) -> List[Any]:
'''simple docstring'''
self.stdout.write(a__ )
# strip tqdm codes
self.file.write(re.sub(r'''^.*\r''' , '''''' , a__ , 0 , re.M ) )
def lowerCamelCase_ ( __UpperCamelCase=80 , __UpperCamelCase=False ):
A_ = []
# deal with critical env vars
A_ = ['''CUDA_VISIBLE_DEVICES''']
for key in env_keys:
A_ = os.environ.get(__UpperCamelCase , __UpperCamelCase )
if val is not None:
cmd.append(F"{key}={val}" )
# python executable (not always needed if the script is executable)
A_ = sys.executable if full_python_path else sys.executable.split('''/''' )[-1]
cmd.append(__UpperCamelCase )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
A_ = []
A_ = ''''''
while len(__UpperCamelCase ) > 0:
current_line += F"{cmd.pop(0 )} "
if len(__UpperCamelCase ) == 0 or len(__UpperCamelCase ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(__UpperCamelCase )
A_ = ''''''
return "\\\n".join(__UpperCamelCase )
def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase ):
# unwrap multi-line input
A_ = re.sub(r'''[\\\n]+''' , ''' ''' , args.base_cmd )
# remove --output_dir if any and set our own
A_ = re.sub('''--output_dir\s+[^\s]+''' , '''''' , args.base_cmd )
args.base_cmd += F" --output_dir {output_dir}"
# ensure we have --overwrite_output_dir
A_ = re.sub('''--overwrite_output_dir\s+''' , '''''' , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 1_00 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.2222_2222] )} , )
A_ = subprocess.run(__UpperCamelCase , capture_output=__UpperCamelCase , text=__UpperCamelCase )
if verbose:
print('''STDOUT''' , result.stdout )
print('''STDERR''' , result.stderr )
# save the streams
A_ = variation.replace(''' ''' , '''-''' )
with open(Path(__UpperCamelCase ) / F"log.{prefix}.stdout.txt" , '''w''' ) as f:
f.write(result.stdout )
with open(Path(__UpperCamelCase ) / F"log.{prefix}.stderr.txt" , '''w''' ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print('''failed''' )
return {target_metric_key: nan}
with io.open(F"{output_dir}/all_results.json" , '''r''' , encoding='''utf-8''' ) as f:
A_ = json.load(__UpperCamelCase )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ):
A_ = []
A_ = []
A_ = F"{id}: {variation:<{longest_variation_len}}"
A_ = F"{preamble}: "
A_ = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(__UpperCamelCase ) , desc=__UpperCamelCase , leave=__UpperCamelCase ):
A_ = process_run_single(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A_ = single_run_metrics[target_metric_key]
if not math.isnan(__UpperCamelCase ):
metrics.append(__UpperCamelCase )
results.append(__UpperCamelCase )
outcome += "✓"
else:
outcome += "✘"
A_ = F"\33[2K\r{outcome}"
if len(__UpperCamelCase ) > 0:
A_ = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
A_ = round(mean_metrics[target_metric_key] , 2 )
A_ = F"{outcome} {mean_target}"
if len(__UpperCamelCase ) > 1:
results_str += F" {tuple(round(__UpperCamelCase , 2 ) for x in results )}"
print(__UpperCamelCase )
A_ = variation
return mean_metrics
else:
print(__UpperCamelCase )
return {variation_key: variation, target_metric_key: nan}
def lowerCamelCase_ ( ):
A_ = torch.cuda.get_device_properties(torch.device('''cuda''' ) )
return F"\nDatetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n"
def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
A_ = pd.DataFrame(__UpperCamelCase )
A_ = '''variation'''
A_ = '''diff_%'''
A_ = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
A_ = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(__UpperCamelCase ):
# as a fallback, use the minimal value as the sentinel
A_ = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(__UpperCamelCase ):
A_ = df.apply(
lambda __UpperCamelCase : round(1_00 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis='''columns''' , )
# re-order columns
A_ = [variation_key, target_metric_key, diff_key, *report_metric_keys]
A_ = df.reindex(__UpperCamelCase , axis='''columns''' ) # reorder cols
# capitalize
A_ = df.rename(str.capitalize , axis='''columns''' )
# make the cols as narrow as possible
A_ = df.rename(lambda __UpperCamelCase : c.replace('''_''' , '''<br>''' ) , axis='''columns''' )
A_ = df.rename(lambda __UpperCamelCase : c.replace('''_''' , '''\n''' ) , axis='''columns''' )
A_ = ['''''', '''Copy between the cut-here-lines and paste as is to github or a forum''']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=__UpperCamelCase , floatfmt='''.2f''' )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=__UpperCamelCase , floatfmt='''.2f''' )]
print('''\n\n'''.join(__UpperCamelCase ) )
def lowerCamelCase_ ( ):
A_ = argparse.ArgumentParser()
parser.add_argument(
'''--base-cmd''' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='''Base cmd''' , )
parser.add_argument(
'''--variations''' , default=__UpperCamelCase , type=__UpperCamelCase , nargs='''+''' , required=__UpperCamelCase , help='''Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'''' , )
parser.add_argument(
'''--base-variation''' , default=__UpperCamelCase , type=__UpperCamelCase , help='''Baseline variation to compare to. if None the minimal target value will be used to compare against''' , )
parser.add_argument(
'''--target-metric-key''' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='''Target metric key in output_dir/all_results.json, e.g., train_samples_per_second''' , )
parser.add_argument(
'''--report-metric-keys''' , default='''''' , type=__UpperCamelCase , help='''Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples''' , )
parser.add_argument(
'''--repeat-times''' , default=1 , type=__UpperCamelCase , help='''How many times to re-run each variation - an average will be reported''' , )
parser.add_argument(
'''--output_dir''' , default='''output_benchmark''' , type=__UpperCamelCase , help='''The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked''' , )
parser.add_argument(
'''--verbose''' , default=__UpperCamelCase , action='''store_true''' , help='''Whether to show the outputs of each run or just the benchmark progress''' , )
A_ = parser.parse_args()
A_ = args.output_dir
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
A_ = get_base_command(__UpperCamelCase , __UpperCamelCase )
# split each dimension into its --foo variations
A_ = [list(map(str.strip , re.split(r'''\|''' , __UpperCamelCase ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
A_ = list(map(str.strip , map(''' '''.join , itertools.product(*__UpperCamelCase ) ) ) )
A_ = max(len(__UpperCamelCase ) for x in variations )
# split wanted keys
A_ = args.report_metric_keys.split()
# capture prints into a log file for convenience
A_ = F"benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt"
print(F"\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt" )
print(F"and this script's output is also piped into {report_fn}" )
A_ = Tee(__UpperCamelCase )
print(F"\n*** Running {len(__UpperCamelCase )} benchmarks:" )
print(F"Base command: {' '.join(__UpperCamelCase )}" )
A_ = '''variation'''
A_ = []
for id, variation in enumerate(tqdm(__UpperCamelCase , desc='''Total completion: ''' , leave=__UpperCamelCase ) ):
A_ = base_cmd + variation.split()
results.append(
process_run(
id + 1 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , args.target_metric_key , __UpperCamelCase , args.repeat_times , __UpperCamelCase , args.verbose , ) )
process_results(__UpperCamelCase , args.target_metric_key , __UpperCamelCase , args.base_variation , __UpperCamelCase )
if __name__ == "__main__":
main()
| 141
|
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
SCREAMING_SNAKE_CASE : List[str] = numpy.array([0, 0])
SCREAMING_SNAKE_CASE : Tuple = numpy.array([0.5, 0.8660254])
SCREAMING_SNAKE_CASE : Tuple = numpy.array([1, 0])
SCREAMING_SNAKE_CASE : List[str] = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase ):
A_ = initial_vectors
for _ in range(__UpperCamelCase ):
A_ = iteration_step(__UpperCamelCase )
return vectors
def lowerCamelCase_ ( __UpperCamelCase ):
A_ = []
for i, start_vector in enumerate(vectors[:-1] ):
A_ = vectors[i + 1]
new_vectors.append(__UpperCamelCase )
A_ = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase ):
A_ = numpy.radians(__UpperCamelCase )
A_ , A_ = numpy.cos(__UpperCamelCase ), numpy.sin(__UpperCamelCase )
A_ = numpy.array(((c, -s), (s, c)) )
return numpy.dot(__UpperCamelCase , __UpperCamelCase )
def lowerCamelCase_ ( __UpperCamelCase ):
A_ = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
A_ , A_ = zip(*__UpperCamelCase )
plt.plot(__UpperCamelCase , __UpperCamelCase )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : Optional[Any] = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 141
| 1
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = ["""image_processor""", """tokenizer"""]
_UpperCamelCase = """BlipImageProcessor"""
_UpperCamelCase = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , A_ , A_ ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : List[str] = False
super().__init__(A_ , A_ )
__lowerCAmelCase : Dict = self.image_processor
def __call__( self , A_ = None , A_ = None , A_ = True , A_ = False , A_ = None , A_ = None , A_ = 0 , A_ = None , A_ = None , A_ = False , A_ = False , A_ = False , A_ = False , A_ = False , A_ = True , A_ = None , **A_ , ) ->BatchEncoding:
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
__lowerCAmelCase : List[str] = self.tokenizer
__lowerCAmelCase : Tuple = self.tokenizer(
text=A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ , max_length=A_ , stride=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , return_overflowing_tokens=A_ , return_special_tokens_mask=A_ , return_offsets_mapping=A_ , return_token_type_ids=A_ , return_length=A_ , verbose=A_ , return_tensors=A_ , **A_ , )
return text_encoding
# add pixel_values
__lowerCAmelCase : str = self.image_processor(A_ , return_tensors=A_ )
if text is not None:
__lowerCAmelCase : Dict = self.tokenizer(
text=A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ , max_length=A_ , stride=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , return_overflowing_tokens=A_ , return_special_tokens_mask=A_ , return_offsets_mapping=A_ , return_token_type_ids=A_ , return_length=A_ , verbose=A_ , return_tensors=A_ , **A_ , )
else:
__lowerCAmelCase : Optional[Any] = None
if text_encoding is not None:
encoding_image_processor.update(A_ )
return encoding_image_processor
def UpperCamelCase__ ( self , *A_ , **A_ ) ->Optional[int]:
'''simple docstring'''
return self.tokenizer.batch_decode(*A_ , **A_ )
def UpperCamelCase__ ( self , *A_ , **A_ ) ->List[Any]:
'''simple docstring'''
return self.tokenizer.decode(*A_ , **A_ )
@property
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : int = self.tokenizer.model_input_names
__lowerCAmelCase : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 714
|
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
_UpperCamelCase = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class __lowercase :
def __init__( self , A_ , A_=16 , A_=13 , A_=7 , A_=14 , A_=10 , A_=19 , A_=5 , A_=4 , A_=True , A_=16 , A_=2 , A_=4 , A_=4 , A_="gelu" , A_=0.1 , A_=0.1 , A_=[1, 2, 3, 4, 5] , A_=25 , A_=5 , ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = d_model
__lowerCAmelCase : Any = parent
__lowerCAmelCase : List[Any] = batch_size
__lowerCAmelCase : int = prediction_length
__lowerCAmelCase : str = context_length
__lowerCAmelCase : Any = cardinality
__lowerCAmelCase : Tuple = num_time_features
__lowerCAmelCase : List[str] = lags_sequence
__lowerCAmelCase : Any = embedding_dimension
__lowerCAmelCase : Dict = is_training
__lowerCAmelCase : Optional[int] = hidden_size
__lowerCAmelCase : List[str] = num_hidden_layers
__lowerCAmelCase : List[Any] = num_attention_heads
__lowerCAmelCase : Tuple = intermediate_size
__lowerCAmelCase : int = hidden_act
__lowerCAmelCase : Optional[int] = hidden_dropout_prob
__lowerCAmelCase : List[str] = attention_probs_dropout_prob
__lowerCAmelCase : Optional[Any] = context_length
__lowerCAmelCase : Any = prediction_length + label_length
__lowerCAmelCase : Union[str, Any] = label_length
__lowerCAmelCase : Any = moving_average
__lowerCAmelCase : Optional[int] = autocorrelation_factor
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def UpperCamelCase__ ( self , A_ ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : str = config.context_length + max(config.lags_sequence )
__lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__lowerCAmelCase : Dict = floats_tensor([self.batch_size, _past_length] )
__lowerCAmelCase : Tuple = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__lowerCAmelCase : str = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__lowerCAmelCase : Tuple = floats_tensor([self.batch_size, config.prediction_length] )
__lowerCAmelCase : Optional[int] = {
'''past_values''': past_values,
'''static_categorical_features''': static_categorical_features,
'''past_time_features''': past_time_features,
'''past_observed_mask''': past_observed_mask,
'''future_time_features''': future_time_features,
'''future_values''': future_values,
}
return inputs_dict
def UpperCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = self.get_config()
__lowerCAmelCase : Tuple = self.prepare_autoformer_inputs_dict(A_ )
return config, inputs_dict
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase, __lowerCAmelCase : int = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCamelCase__ ( self , A_ , A_ ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = AutoformerModel(config=A_ ).to(A_ ).eval()
__lowerCAmelCase : Optional[int] = model(**A_ )
__lowerCAmelCase : Dict = outputs.encoder_last_hidden_state
__lowerCAmelCase : int = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase : Tuple = model.get_encoder()
encoder.save_pretrained(A_ )
__lowerCAmelCase : List[Any] = AutoformerEncoder.from_pretrained(A_ ).to(A_ )
__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase : Any = model.create_network_inputs(**A_ )
__lowerCAmelCase, __lowerCAmelCase : Dict = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__lowerCAmelCase : Dict = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__lowerCAmelCase : Optional[Any] = encoder(inputs_embeds=A_ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
__lowerCAmelCase : List[Any] = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__lowerCAmelCase : List[str] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__lowerCAmelCase : Dict = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__lowerCAmelCase : str = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase : Dict = model.get_decoder()
decoder.save_pretrained(A_ )
__lowerCAmelCase : Any = AutoformerDecoder.from_pretrained(A_ ).to(A_ )
__lowerCAmelCase : List[str] = decoder(
trend=A_ , inputs_embeds=A_ , encoder_hidden_states=A_ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class __lowercase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_UpperCamelCase = (AutoformerForPrediction,) if is_torch_available() else ()
_UpperCamelCase = {"""feature-extraction""": AutoformerModel} if is_torch_available() else {}
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase : int = AutoformerModelTester(self )
__lowerCAmelCase : Dict = ConfigTester(self , config_class=A_ , has_text_modality=A_ )
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
__lowerCAmelCase, __lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__lowerCAmelCase : Optional[Any] = model_class(A_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A_ )
__lowerCAmelCase, __lowerCAmelCase : Optional[Any] = model_class.from_pretrained(A_ , output_loading_info=A_ )
self.assertEqual(info['''missing_keys'''] , [] )
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*A_ )
@unittest.skip(reason='''Model has no tokens embeddings''' )
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
pass
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = inspect.signature(getattr(A_ , '''forward''' ) )
# The main input is the name of the argument after `self`
__lowerCAmelCase : Optional[Any] = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , A_ )
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase, __lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : str = model_class(A_ )
__lowerCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase : Optional[Any] = [*signature.parameters.keys()]
__lowerCAmelCase : str = [
'''past_values''',
'''past_time_features''',
'''past_observed_mask''',
'''static_categorical_features''',
'''static_real_features''',
'''future_values''',
'''future_time_features''',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('''future_observed_mask''' )
expected_arg_names.extend(
[
'''decoder_attention_mask''',
'''head_mask''',
'''decoder_head_mask''',
'''cross_attn_head_mask''',
'''encoder_outputs''',
'''past_key_values''',
'''output_hidden_states''',
'''output_attentions''',
'''use_cache''',
'''return_dict''',
] )
self.assertListEqual(arg_names[: len(A_ )] , A_ )
def UpperCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase, __lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : Any = True
__lowerCAmelCase : Tuple = getattr(self.model_tester , '''seq_length''' , A_ )
__lowerCAmelCase : Tuple = getattr(self.model_tester , '''decoder_seq_length''' , A_ )
__lowerCAmelCase : Any = getattr(self.model_tester , '''encoder_seq_length''' , A_ )
__lowerCAmelCase : List[str] = getattr(self.model_tester , '''d_model''' , A_ )
__lowerCAmelCase : int = getattr(self.model_tester , '''num_attention_heads''' , A_ )
__lowerCAmelCase : Union[str, Any] = d_model // num_attention_heads
for model_class in self.all_model_classes:
__lowerCAmelCase : Any = True
__lowerCAmelCase : Union[str, Any] = False
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : Optional[int] = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase : str = model(**self._prepare_for_class(A_ , A_ ) )
__lowerCAmelCase : Optional[int] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : Union[str, Any] = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase : Any = model(**self._prepare_for_class(A_ , A_ ) )
__lowerCAmelCase : Dict = outputs.encoder_attentions
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__lowerCAmelCase : Optional[Any] = len(A_ )
__lowerCAmelCase : List[Any] = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(A_ , A_ )
# decoder attentions
__lowerCAmelCase : List[str] = outputs.decoder_attentions
self.assertIsInstance(A_ , (list, tuple) )
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__lowerCAmelCase : Any = outputs.cross_attentions
self.assertIsInstance(A_ , (list, tuple) )
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__lowerCAmelCase : List[str] = True
__lowerCAmelCase : List[Any] = True
__lowerCAmelCase : Dict = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase : Optional[int] = model(**self._prepare_for_class(A_ , A_ ) )
self.assertEqual(out_len + 2 , len(A_ ) )
__lowerCAmelCase : Optional[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
super().test_retain_grad_hidden_states_attentions()
def _lowercase ( lowercase__="train-batch.pt" ):
__lowerCAmelCase : List[str] = hf_hub_download(repo_id='''hf-internal-testing/tourism-monthly-batch''' , filename=lowercase__ , repo_type='''dataset''' )
__lowerCAmelCase : Tuple = torch.load(lowercase__ , map_location=lowercase__ )
return batch
@require_torch
@slow
class __lowercase (unittest.TestCase ):
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = AutoformerModel.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(A_ )
__lowerCAmelCase : Tuple = prepare_batch()
with torch.no_grad():
__lowerCAmelCase : Optional[Any] = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , future_values=batch['''future_values'''] , future_time_features=batch['''future_time_features'''] , )[0]
__lowerCAmelCase : Dict = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , A_ )
__lowerCAmelCase : Union[str, Any] = torch.tensor(
[[0.3_593, -1.3_398, 0.6_330], [0.2_279, 1.5_396, -0.1_792], [0.0_450, 1.3_225, -0.2_335]] , device=A_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , A_ , atol=A_ ) )
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Tuple = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(A_ )
__lowerCAmelCase : Dict = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
__lowerCAmelCase : List[Any] = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , ).encoder_last_hidden_state
__lowerCAmelCase : List[str] = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , A_ )
__lowerCAmelCase : Any = torch.tensor(
[[-0.0_734, -0.9_036, 0.8_358], [4.7_186, 2.4_113, 1.9_581], [1.7_953, 2.3_558, 1.2_970]] , device=A_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , A_ , atol=A_ ) )
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : Dict = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(A_ )
__lowerCAmelCase : Dict = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
__lowerCAmelCase : Optional[int] = model.generate(
static_categorical_features=batch['''static_categorical_features'''] , past_time_features=batch['''past_time_features'''] , past_values=batch['''past_values'''] , future_time_features=batch['''future_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , )
__lowerCAmelCase : Optional[Any] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , A_ )
__lowerCAmelCase : Optional[Any] = torch.tensor([3_130.6_763, 4_056.5_293, 7_053.0_786] , device=A_ )
__lowerCAmelCase : Tuple = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , A_ , rtol=1e-1 ) )
| 583
| 0
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCamelCase ( _UpperCAmelCase ):
lowercase : str = ['image_processor', 'tokenizer']
lowercase : List[Any] = 'ViTImageProcessor'
lowercase : Tuple = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : Optional[int] = kwargs.pop("""feature_extractor""" )
UpperCamelCase : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __call__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ):
if text is None and visual_prompt is None and images is None:
raise ValueError("""You have to specify either text, visual prompt or images.""" )
if text is not None and visual_prompt is not None:
raise ValueError("""You have to specify exactly one type of prompt. Either text or visual prompt.""" )
if text is not None:
UpperCamelCase : Dict = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if visual_prompt is not None:
UpperCamelCase : int = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if images is not None:
UpperCamelCase : Tuple = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if visual_prompt is not None and images is not None:
UpperCamelCase : List[str] = {
"""pixel_values""": image_features.pixel_values,
"""conditional_pixel_values""": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
UpperCamelCase : int = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
UpperCamelCase : Tuple = {
"""conditional_pixel_values""": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE_ ) , tensor_type=SCREAMING_SNAKE_CASE_ )
def a_ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def a_ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def a_ ( self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , SCREAMING_SNAKE_CASE_ , )
return self.image_processor_class
@property
def a_ ( self ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , SCREAMING_SNAKE_CASE_ , )
return self.image_processor
| 499
|
"""simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
__A : List[str] = get_logger(__name__)
__A : Optional[int] = Path(__file__).parent / '''model_card_template.md'''
__A : Tuple = uuida().hex
__A : Optional[int] = os.getenv('''HF_HUB_OFFLINE''', '''''').upper() in ENV_VARS_TRUE_VALUES
__A : List[Any] = os.getenv('''DISABLE_TELEMETRY''', '''''').upper() in ENV_VARS_TRUE_VALUES
__A : str = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '''/api/telemetry/'''
def A_ ( snake_case_ : Union[Dict, str, None] = None ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = f'diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f'; torch/{_torch_version}'
if is_flax_available():
ua += f'; jax/{_jax_version}'
ua += f'; flax/{_flax_version}'
if is_onnx_available():
ua += f'; onnxruntime/{_onnxruntime_version}'
# CI will set this value to True
if os.environ.get("""DIFFUSERS_IS_CI""" ,"""""" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(snake_case_ ,snake_case_ ):
ua += "; " + "; ".join(f'{k}/{v}' for k, v in user_agent.items() )
elif isinstance(snake_case_ ,snake_case_ ):
ua += "; " + user_agent
return ua
def A_ ( snake_case_ : str ,snake_case_ : Optional[str] = None ,snake_case_ : Optional[str] = None ):
'''simple docstring'''
if token is None:
UpperCamelCase : Tuple = HfFolder.get_token()
if organization is None:
UpperCamelCase : Optional[Any] = whoami(snake_case_ )["""name"""]
return f'{username}/{model_id}'
else:
return f'{organization}/{model_id}'
def A_ ( snake_case_ : str ,snake_case_ : Any ):
'''simple docstring'''
if not is_jinja_available():
raise ValueError(
"""Modelcard rendering is based on Jinja templates."""
""" Please make sure to have `jinja` installed before using `create_model_card`."""
""" To install it, please run `pip install Jinja2`.""" )
if hasattr(snake_case_ ,"""local_rank""" ) and args.local_rank not in [-1, 0]:
return
UpperCamelCase : Tuple = args.hub_token if hasattr(snake_case_ ,"""hub_token""" ) else None
UpperCamelCase : List[str] = get_full_repo_name(snake_case_ ,token=snake_case_ )
UpperCamelCase : Union[str, Any] = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="""en""" ,license="""apache-2.0""" ,library_name="""diffusers""" ,tags=[] ,datasets=args.dataset_name ,metrics=[] ,) ,template_path=snake_case_ ,model_name=snake_case_ ,repo_name=snake_case_ ,dataset_name=args.dataset_name if hasattr(snake_case_ ,"""dataset_name""" ) else None ,learning_rate=args.learning_rate ,train_batch_size=args.train_batch_size ,eval_batch_size=args.eval_batch_size ,gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(snake_case_ ,"""gradient_accumulation_steps""" ) else None
) ,adam_betaa=args.adam_betaa if hasattr(snake_case_ ,"""adam_beta1""" ) else None ,adam_betaa=args.adam_betaa if hasattr(snake_case_ ,"""adam_beta2""" ) else None ,adam_weight_decay=args.adam_weight_decay if hasattr(snake_case_ ,"""adam_weight_decay""" ) else None ,adam_epsilon=args.adam_epsilon if hasattr(snake_case_ ,"""adam_epsilon""" ) else None ,lr_scheduler=args.lr_scheduler if hasattr(snake_case_ ,"""lr_scheduler""" ) else None ,lr_warmup_steps=args.lr_warmup_steps if hasattr(snake_case_ ,"""lr_warmup_steps""" ) else None ,ema_inv_gamma=args.ema_inv_gamma if hasattr(snake_case_ ,"""ema_inv_gamma""" ) else None ,ema_power=args.ema_power if hasattr(snake_case_ ,"""ema_power""" ) else None ,ema_max_decay=args.ema_max_decay if hasattr(snake_case_ ,"""ema_max_decay""" ) else None ,mixed_precision=args.mixed_precision ,)
UpperCamelCase : str = os.path.join(args.output_dir ,"""README.md""" )
model_card.save(snake_case_ )
def A_ ( snake_case_ : Optional[str] ,snake_case_ : Optional[str] = None ):
'''simple docstring'''
if resolved_file is None or commit_hash is not None:
return commit_hash
UpperCamelCase : List[str] = str(Path(snake_case_ ).as_posix() )
UpperCamelCase : Any = re.search(R"""snapshots/([^/]+)/""" ,snake_case_ )
if search is None:
return None
UpperCamelCase : List[Any] = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(snake_case_ ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
__A : Union[str, Any] = os.path.expanduser(
os.getenv('''HF_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''huggingface'''))
)
__A : Optional[Any] = os.path.join(hf_cache_home, '''diffusers''')
def A_ ( snake_case_ : Optional[str] = None ,snake_case_ : Optional[str] = None ):
'''simple docstring'''
if new_cache_dir is None:
UpperCamelCase : int = DIFFUSERS_CACHE
if old_cache_dir is None:
UpperCamelCase : Optional[Any] = old_diffusers_cache
UpperCamelCase : Tuple = Path(snake_case_ ).expanduser()
UpperCamelCase : Tuple = Path(snake_case_ ).expanduser()
for old_blob_path in old_cache_dir.glob("""**/blobs/*""" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
UpperCamelCase : str = new_cache_dir / old_blob_path.relative_to(snake_case_ )
new_blob_path.parent.mkdir(parents=snake_case_ ,exist_ok=snake_case_ )
os.replace(snake_case_ ,snake_case_ )
try:
os.symlink(snake_case_ ,snake_case_ )
except OSError:
logger.warning(
"""Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.""" )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
__A : str = os.path.join(DIFFUSERS_CACHE, '''version_diffusers_cache.txt''')
if not os.path.isfile(cache_version_file):
__A : Tuple = 0
else:
with open(cache_version_file) as f:
try:
__A : Optional[int] = int(f.read())
except ValueError:
__A : Union[str, Any] = 0
if cache_version < 1:
__A : List[str] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'''The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '''
'''existing cached models. This is a one-time operation, you can interrupt it or run it '''
'''later by calling `diffusers.utils.hub_utils.move_cache()`.'''
)
try:
move_cache()
except Exception as e:
__A : Optional[int] = '''\n'''.join(traceback.format_tb(e.__traceback__))
logger.error(
F'''There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '''
'''file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '''
'''message and we will do our best to help.'''
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, '''w''') as f:
f.write('''1''')
except Exception:
logger.warning(
F'''There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '''
'''the directory exists and can be written to.'''
)
def A_ ( snake_case_ : str ,snake_case_ : Optional[str] = None ):
'''simple docstring'''
if variant is not None:
UpperCamelCase : Any = weights_name.split(""".""" )
UpperCamelCase : Optional[int] = splits[:-1] + [variant] + splits[-1:]
UpperCamelCase : int = """.""".join(snake_case_ )
return weights_name
def A_ ( snake_case_ : int ,*,
snake_case_ : Dict ,snake_case_ : Optional[Any] ,snake_case_ : Any ,snake_case_ : str ,snake_case_ : List[str] ,snake_case_ : Dict ,snake_case_ : List[str] ,snake_case_ : str ,snake_case_ : Dict ,snake_case_ : Tuple ,snake_case_ : Union[str, Any]=None ,):
'''simple docstring'''
UpperCamelCase : str = str(snake_case_ )
if os.path.isfile(snake_case_ ):
return pretrained_model_name_or_path
elif os.path.isdir(snake_case_ ):
if os.path.isfile(os.path.join(snake_case_ ,snake_case_ ) ):
# Load from a PyTorch checkpoint
UpperCamelCase : Optional[Any] = os.path.join(snake_case_ ,snake_case_ )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(snake_case_ ,snake_case_ ,snake_case_ ) ):
UpperCamelCase : List[Any] = os.path.join(snake_case_ ,snake_case_ ,snake_case_ )
return model_file
else:
raise EnvironmentError(
f'Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(snake_case_ ).base_version ) >= version.parse("""0.20.0""" )
):
try:
UpperCamelCase : List[Any] = hf_hub_download(
snake_case_ ,filename=_add_variant(snake_case_ ,snake_case_ ) ,cache_dir=snake_case_ ,force_download=snake_case_ ,proxies=snake_case_ ,resume_download=snake_case_ ,local_files_only=snake_case_ ,use_auth_token=snake_case_ ,user_agent=snake_case_ ,subfolder=snake_case_ ,revision=revision or commit_hash ,)
warnings.warn(
f'Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.' ,snake_case_ ,)
return model_file
except: # noqa: E722
warnings.warn(
f'You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(snake_case_ ,snake_case_ )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(snake_case_ ,snake_case_ )}\' so that the correct variant file can be added.' ,snake_case_ ,)
try:
# 2. Load model file as usual
UpperCamelCase : int = hf_hub_download(
snake_case_ ,filename=snake_case_ ,cache_dir=snake_case_ ,force_download=snake_case_ ,proxies=snake_case_ ,resume_download=snake_case_ ,local_files_only=snake_case_ ,use_auth_token=snake_case_ ,user_agent=snake_case_ ,subfolder=snake_case_ ,revision=revision or commit_hash ,)
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f'{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '
"""listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a """
"""token having permission to this repo with `use_auth_token` or log in with `huggingface-cli """
"""login`.""" )
except RevisionNotFoundError:
raise EnvironmentError(
f'{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '
"""this model name. Check the model page at """
f'\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.' )
except EntryNotFoundError:
raise EnvironmentError(
f'{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.' )
except HTTPError as err:
raise EnvironmentError(
f'There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}' )
except ValueError:
raise EnvironmentError(
f'We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'
f' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'
f' directory containing a file named {weights_name} or'
""" \nCheckout your internet connection or see how to run the library in"""
""" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'.""" )
except EnvironmentError:
raise EnvironmentError(
f'Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '
"""'https://huggingface.co/models', make sure you don't have a local directory with the same name. """
f'Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '
f'containing a file named {weights_name}' )
| 499
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase ={
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase =[
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 255
|
"""simple docstring"""
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
UpperCAmelCase =get_logger(__name__)
UpperCAmelCase =R"\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n"
class lowerCamelCase__ :
'''simple docstring'''
@add_start_docstrings(lowerCamelCase_ )
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> jnp.ndarray:
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class lowerCamelCase__ :
'''simple docstring'''
@add_start_docstrings(lowerCamelCase_ )
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> jnp.ndarray:
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@add_start_docstrings(lowerCamelCase_ )
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,**lowerCamelCase_ ) -> jnp.ndarray:
for processor in self:
A = inspect.signature(processor.__call__ ).parameters
if len(lowerCamelCase_ ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f'Make sure that all the required parameters: {list(function_args.keys() )} for '
f'{processor.__class__} are passed to the logits processor.' )
A = processor(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,**lowerCamelCase_ )
else:
A = processor(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
return scores
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ) -> List[str]:
if not isinstance(lowerCamelCase_ ,lowerCamelCase_ ) or not (temperature > 0):
raise ValueError(f'`temperature` has to be a strictly positive float, but is {temperature}' )
A = temperature
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> jnp.ndarray:
A = scores / self.temperature
return scores
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_ = -float("""Inf""" ) ,lowerCamelCase_ = 1 ) -> Dict:
if not isinstance(lowerCamelCase_ ,lowerCamelCase_ ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f'`top_p` has to be a float > 0 and < 1, but is {top_p}' )
if not isinstance(lowerCamelCase_ ,lowerCamelCase_ ) or (min_tokens_to_keep < 1):
raise ValueError(f'`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}' )
A = top_p
A = filter_value
A = min_tokens_to_keep
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> jnp.ndarray:
A , A = lax.top_k(lowerCamelCase_ ,scores.shape[-1] )
A = jnp.full_like(lowerCamelCase_ ,self.filter_value )
A = jax.nn.softmax(lowerCamelCase_ ,axis=-1 ).cumsum(axis=-1 )
A = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
A = jnp.roll(lowerCamelCase_ ,1 )
score_mask |= score_mask.at[:, 0].set(lowerCamelCase_ )
# min tokens to keep
A = score_mask.at[:, : self.min_tokens_to_keep].set(lowerCamelCase_ )
A = jnp.where(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
A = jax.lax.sort_key_val(lowerCamelCase_ ,lowerCamelCase_ )[-1]
return next_scores
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_ = -float("""Inf""" ) ,lowerCamelCase_ = 1 ) -> List[Any]:
if not isinstance(lowerCamelCase_ ,lowerCamelCase_ ) or top_k <= 0:
raise ValueError(f'`top_k` has to be a strictly positive integer, but is {top_k}' )
A = max(lowerCamelCase_ ,lowerCamelCase_ )
A = filter_value
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> jnp.ndarray:
A , A = scores.shape
A = jnp.full(batch_size * vocab_size ,self.filter_value )
A = min(self.top_k ,scores.shape[-1] ) # Safety check
A , A = lax.top_k(lowerCamelCase_ ,lowerCamelCase_ )
A = jnp.broadcast_to((jnp.arange(lowerCamelCase_ ) * vocab_size)[:, None] ,(batch_size, topk) ).flatten()
A = topk_scores.flatten()
A = topk_indices.flatten() + shift
A = next_scores_flat.at[topk_indices_flat].set(lowerCamelCase_ )
A = next_scores_flat.reshape(lowerCamelCase_ ,lowerCamelCase_ )
return next_scores
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ) -> List[Any]:
A = bos_token_id
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> jnp.ndarray:
A = jnp.full(scores.shape ,-float("""inf""" ) )
A = 1 - jnp.bool_(cur_len - 1 )
A = jnp.where(lowerCamelCase_ ,new_scores.at[:, self.bos_token_id].set(0 ) ,lowerCamelCase_ )
return scores
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> Union[str, Any]:
A = max_length
A = eos_token_id
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> jnp.ndarray:
A = jnp.full(scores.shape ,-float("""inf""" ) )
A = 1 - jnp.bool_(cur_len - self.max_length + 1 )
A = jnp.where(lowerCamelCase_ ,new_scores.at[:, self.eos_token_id].set(0 ) ,lowerCamelCase_ )
return scores
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> List[str]:
if not isinstance(lowerCamelCase_ ,lowerCamelCase_ ) or min_length < 0:
raise ValueError(f'`min_length` has to be a positive integer, but is {min_length}' )
if not isinstance(lowerCamelCase_ ,lowerCamelCase_ ) or eos_token_id < 0:
raise ValueError(f'`eos_token_id` has to be a positive integer, but is {eos_token_id}' )
A = min_length
A = eos_token_id
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> jnp.ndarray:
# create boolean flag to decide if min length penalty should be applied
A = 1 - jnp.clip(cur_len - self.min_length ,0 ,1 )
A = jnp.where(lowerCamelCase_ ,scores.at[:, self.eos_token_id].set(-float("""inf""" ) ) ,lowerCamelCase_ )
return scores
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> List[str]:
A = list(lowerCamelCase_ )
A = begin_index
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> Dict:
A = 1 - jnp.bool_(cur_len - self.begin_index )
A = jnp.where(lowerCamelCase_ ,scores.at[:, self.begin_suppress_tokens].set(-float("""inf""" ) ) ,lowerCamelCase_ )
return scores
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ) -> str:
A = list(lowerCamelCase_ )
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> jnp.ndarray:
A = scores.at[..., self.suppress_tokens].set(-float("""inf""" ) )
return scores
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ) -> Union[str, Any]:
A = dict(lowerCamelCase_ )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
A = jnp.ones((max(force_token_map.keys() ) + 1) ,dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
A = force_token_array.at[index].set(lowerCamelCase_ )
A = jnp.intaa(lowerCamelCase_ )
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> jnp.ndarray:
def _force_token(lowerCamelCase_ ):
A = scores.shape[0]
A = self.force_token_array[generation_idx]
A = jnp.ones_like(lowerCamelCase_ ,dtype=scores.dtype ) * -float("""inf""" )
A = jnp.zeros((batch_size, 1) ,dtype=scores.dtype )
A = lax.dynamic_update_slice(lowerCamelCase_ ,lowerCamelCase_ ,(0, current_token) )
return new_scores
A = lax.cond(
cur_len >= self.force_token_array.shape[0] ,lambda: scores ,lambda: lax.cond(
self.force_token_array[cur_len] >= 0 ,lambda: _force_token(lowerCamelCase_ ) ,lambda: scores ,) ,)
return scores
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> Optional[int]:
A = generate_config.eos_token_id
A = generate_config.no_timestamps_token_id
A = generate_config.no_timestamps_token_id + 1
A = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(lowerCamelCase_ ,"""max_initial_timestamp_index""" ):
A = generate_config.max_initial_timestamp_index
else:
A = model_config.vocab_size
if self.max_initial_timestamp_index is None:
A = model_config.vocab_size
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> List[Any]:
# suppress <|notimestamps|> which is handled by without_timestamps
A = scores.at[:, self.no_timestamps_token_id].set(-float("""inf""" ) )
def handle_pairs(lowerCamelCase_ ,lowerCamelCase_ ):
A = jnp.where((cur_len - self.begin_index) >= 1 ,lowerCamelCase_ ,lowerCamelCase_ )
A = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin ,True and last_was_timestamp ,lowerCamelCase_ ,)
A = jnp.where((cur_len - self.begin_index) < 2 ,lowerCamelCase_ ,lowerCamelCase_ )
A = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin ,lowerCamelCase_ ,lowerCamelCase_ ,)
return jnp.where(
lowerCamelCase_ ,jnp.where(
penultimate_was_timestamp > 0 ,scores_k.at[self.timestamp_begin :].set(-float("""inf""" ) ) ,scores_k.at[: self.eos_token_id].set(-float("""inf""" ) ) ,) ,lowerCamelCase_ ,)
A = jax.vmap(lowerCamelCase_ )(lowerCamelCase_ ,lowerCamelCase_ )
A = jnp.where(cur_len == self.begin_index ,lowerCamelCase_ ,lowerCamelCase_ )
A = jnp.where(
self.max_initial_timestamp_index is not None ,True and apply_max_initial_timestamp ,lowerCamelCase_ ,)
A = self.timestamp_begin + self.max_initial_timestamp_index
A = jnp.where(
lowerCamelCase_ ,scores.at[:, last_allowed + 1 :].set(-float("""inf""" ) ) ,lowerCamelCase_ ,)
# if sum of probability over timestamps is above any other token, sample timestamp
A = jax.nn.log_softmax(lowerCamelCase_ ,axis=-1 )
def handle_cumulative_probs(lowerCamelCase_ ,lowerCamelCase_ ):
A = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] ,axis=-1 )
A = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob ,scores_k.at[: self.timestamp_begin].set(-float("""inf""" ) ) ,lowerCamelCase_ ,)
A = jax.vmap(lowerCamelCase_ )(lowerCamelCase_ ,lowerCamelCase_ )
return scores
| 255
| 1
|
import requests
from bsa import BeautifulSoup
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
__snake_case : Tuple = BeautifulSoup(requests.get(__lowerCamelCase , params=__lowerCamelCase ).content , "html.parser" )
__snake_case : Optional[Any] = soup.find("div" , attrs={"class": "gs_ri"} )
__snake_case : str = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
_snake_case : Union[str, Any] = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 30,
'pages': '3979-3990',
'year': 2_018,
'hl': 'en',
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 81
|
def __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
if index == number_of_items:
return 0
_lowercase: str = 0
_lowercase: Tuple = 0
_lowercase: int = knapsack(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , index + 1 )
if weights[index] <= max_weight:
_lowercase: int = values[index] + knapsack(
__magic_name__ , __magic_name__ , __magic_name__ , max_weight - weights[index] , index + 1 )
return max(__magic_name__ , __magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 226
| 0
|
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
_UpperCAmelCase : str = ["""gpt2"""]
_UpperCAmelCase : Optional[int] = """gpt2"""
if is_tf_available():
class lowerCAmelCase ( tf.Module ):
def __init__( self : Dict , UpperCAmelCase : int ) -> Union[str, Any]:
super().__init__()
lowerCamelCase__ : Optional[int] = tokenizer
lowerCamelCase__ : Tuple = AutoConfig.from_pretrained(UpperCAmelCase )
lowerCamelCase__ : List[str] = TFGPTaLMHeadModel.from_config(UpperCAmelCase )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='text' ),) )
def A_ ( self : Tuple , UpperCAmelCase : Optional[Any] ) -> str:
lowerCamelCase__ : str = self.tokenizer(UpperCAmelCase )
lowerCamelCase__ : int = tokenized['input_ids'].to_tensor()
lowerCamelCase__ : Optional[Any] = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
lowerCamelCase__ : List[Any] = self.model(input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase )['logits']
return outputs
@require_tf
@require_keras_nlp
class lowerCAmelCase ( unittest.TestCase ):
def A_ ( self : Dict ) -> List[Any]:
super().setUp()
lowerCamelCase__ : Tuple = [GPTaTokenizer.from_pretrained(UpperCAmelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
lowerCamelCase__ : str = [TFGPTaTokenizer.from_pretrained(UpperCAmelCase ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
lowerCamelCase__ : Optional[int] = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
lowerCamelCase__ : Tuple = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def A_ ( self : Union[str, Any] ) -> int:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
lowerCamelCase__ : Tuple = tokenizer([test_inputs] , return_tensors='tf' )
lowerCamelCase__ : Optional[int] = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
lowerCamelCase__ : Union[str, Any] = python_outputs[key].numpy()
lowerCamelCase__ : Union[str, Any] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(UpperCAmelCase , tf.intaa ) == tf_outputs_values ) )
@slow
def A_ ( self : Any ) -> Dict:
for tf_tokenizer in self.tf_tokenizers:
lowerCamelCase__ : List[Any] = tf.function(UpperCAmelCase )
for test_inputs in self.test_sentences:
lowerCamelCase__ : Tuple = tf.constant(UpperCAmelCase )
lowerCamelCase__ : Optional[int] = compiled_tokenizer(UpperCAmelCase )
lowerCamelCase__ : List[Any] = tf_tokenizer(UpperCAmelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def A_ ( self : List[str] ) -> Optional[Any]:
for tf_tokenizer in self.tf_tokenizers:
lowerCamelCase__ : int = ModelToSave(tokenizer=UpperCAmelCase )
lowerCamelCase__ : Optional[int] = tf.convert_to_tensor([self.test_sentences[0]] )
lowerCamelCase__ : Optional[Any] = model.serving(UpperCAmelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
lowerCamelCase__ : Any = Path(UpperCAmelCase ) / 'saved.model'
tf.saved_model.save(UpperCAmelCase , UpperCAmelCase , signatures={'serving_default': model.serving} )
lowerCamelCase__ : str = tf.saved_model.load(UpperCAmelCase )
lowerCamelCase__ : str = loaded_model.signatures['serving_default'](UpperCAmelCase )['output_0']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def A_ ( self : int ) -> Dict:
for tf_tokenizer in self.tf_tokenizers:
lowerCamelCase__ : Optional[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
lowerCamelCase__ : Union[str, Any] = tf_tokenizer(UpperCAmelCase ) # Build model with some sample inputs
lowerCamelCase__ : Optional[Any] = tf_tokenizer.get_config()
lowerCamelCase__ : Any = TFGPTaTokenizer.from_config(UpperCAmelCase )
lowerCamelCase__ : str = model_from_config(UpperCAmelCase )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def A_ ( self : Dict ) -> Optional[int]:
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
lowerCamelCase__ : Dict = 123123
for max_length in [3, 5, 1024]:
lowerCamelCase__ : List[str] = tf.convert_to_tensor([self.test_sentences[0]] )
lowerCamelCase__ : List[Any] = tf_tokenizer(UpperCAmelCase , max_length=UpperCAmelCase )
lowerCamelCase__ : List[Any] = out['input_ids'].numpy().shape[1]
assert out_length == max_length
| 708
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = ["""image_processor""", """tokenizer"""]
UpperCAmelCase__ = """OwlViTImageProcessor"""
UpperCAmelCase__ = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : Optional[int] , UpperCAmelCase : str=None , UpperCAmelCase : Any=None , **UpperCAmelCase : Optional[Any] ) -> Dict:
lowerCamelCase__ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCAmelCase , )
lowerCamelCase__ : Optional[Any] = kwargs.pop('feature_extractor' )
lowerCamelCase__ : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self : List[Any] , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Tuple=None , UpperCAmelCase : int=None , UpperCAmelCase : Optional[int]="max_length" , UpperCAmelCase : Dict="np" , **UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(UpperCAmelCase , UpperCAmelCase ) or (isinstance(UpperCAmelCase , UpperCAmelCase ) and not isinstance(text[0] , UpperCAmelCase )):
lowerCamelCase__ : Any = [self.tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )]
elif isinstance(UpperCAmelCase , UpperCAmelCase ) and isinstance(text[0] , UpperCAmelCase ):
lowerCamelCase__ : List[str] = []
# Maximum number of queries across batch
lowerCamelCase__ : List[Any] = max([len(UpperCAmelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(UpperCAmelCase ) != max_num_queries:
lowerCamelCase__ : int = t + [' '] * (max_num_queries - len(UpperCAmelCase ))
lowerCamelCase__ : str = self.tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
encodings.append(UpperCAmelCase )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
lowerCamelCase__ : Optional[Any] = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowerCamelCase__ : int = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
lowerCamelCase__ : Optional[int] = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowerCamelCase__ : Any = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
lowerCamelCase__ : Union[str, Any] = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 )
lowerCamelCase__ : Dict = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
lowerCamelCase__ : Any = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowerCamelCase__ : Optional[int] = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
lowerCamelCase__ : str = BatchEncoding()
lowerCamelCase__ : int = input_ids
lowerCamelCase__ : Any = attention_mask
if query_images is not None:
lowerCamelCase__ : Union[str, Any] = BatchEncoding()
lowerCamelCase__ : Optional[Any] = self.image_processor(
UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase ).pixel_values
lowerCamelCase__ : List[Any] = query_pixel_values
if images is not None:
lowerCamelCase__ : List[Any] = self.image_processor(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
if text is not None and images is not None:
lowerCamelCase__ : Tuple = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
lowerCamelCase__ : Dict = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase ) , tensor_type=UpperCAmelCase )
def A_ ( self : str , *UpperCAmelCase : Dict , **UpperCAmelCase : List[Any] ) -> Any:
return self.image_processor.post_process(*UpperCAmelCase , **UpperCAmelCase )
def A_ ( self : int , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Tuple ) -> str:
return self.image_processor.post_process_object_detection(*UpperCAmelCase , **UpperCAmelCase )
def A_ ( self : Tuple , *UpperCAmelCase : Dict , **UpperCAmelCase : Optional[int] ) -> Optional[Any]:
return self.image_processor.post_process_image_guided_detection(*UpperCAmelCase , **UpperCAmelCase )
def A_ ( self : Optional[Any] , *UpperCAmelCase : Tuple , **UpperCAmelCase : List[Any] ) -> Tuple:
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def A_ ( self : Union[str, Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : List[Any] ) -> List[str]:
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def A_ ( self : List[str] ) -> Optional[Any]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , UpperCAmelCase , )
return self.image_processor_class
@property
def A_ ( self : int ) -> int:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , UpperCAmelCase , )
return self.image_processor
| 188
| 0
|
def __lowerCamelCase ( __a :int = 1 , __a :int = 1_0_0_0 ) -> int:
"""simple docstring"""
A__ = 1
A__ = 0
for divide_by_number in range(__a , digit + 1 ):
A__ = []
A__ = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(__a ):
A__ = len(__a )
A__ = divide_by_number
else:
has_been_divided.append(__a )
A__ = now_divide * 1_0 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 176
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCamelCase : Dict = '''Wav2Vec2FeatureExtractor'''
__lowerCamelCase : str = '''AutoTokenizer'''
def __init__( self : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
A__ = self.feature_extractor
A__ = False
@classmethod
def a_ ( cls : Union[str, Any] , __lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : Union[str, Any] ) -> int:
"""simple docstring"""
try:
return super().from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
except OSError:
warnings.warn(
f'Loading a tokenizer inside {cls.__name__} from a config that does not'
""" include a `tokenizer_class` attribute is deprecated and will be """
"""removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"""
""" attribute to either your `config.json` or `tokenizer_config.json` """
"""file to suppress this warning: """ , __lowerCAmelCase , )
A__ = WavaVecaFeatureExtractor.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
A__ = WavaVecaCTCTokenizer.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
return cls(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
def __call__( self : Any , *__lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__lowerCAmelCase , **__lowerCAmelCase )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
A__ = kwargs.pop("""raw_speech""" )
else:
A__ = kwargs.pop("""audio""" , __lowerCAmelCase )
A__ = kwargs.pop("""sampling_rate""" , __lowerCAmelCase )
A__ = kwargs.pop("""text""" , __lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
A__ = args[0]
A__ = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
A__ = self.feature_extractor(__lowerCAmelCase , *__lowerCAmelCase , sampling_rate=__lowerCAmelCase , **__lowerCAmelCase )
if text is not None:
A__ = self.tokenizer(__lowerCAmelCase , **__lowerCAmelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
A__ = encodings["""input_ids"""]
return inputs
def a_ ( self : List[str] , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : Tuple ) -> List[str]:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*__lowerCAmelCase , **__lowerCAmelCase )
A__ = kwargs.pop("""input_features""" , __lowerCAmelCase )
A__ = kwargs.pop("""labels""" , __lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
A__ = args[0]
A__ = args[1:]
if input_features is not None:
A__ = self.feature_extractor.pad(__lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase )
if labels is not None:
A__ = self.tokenizer.pad(__lowerCAmelCase , **__lowerCAmelCase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
A__ = labels["""input_ids"""]
return input_features
def a_ ( self : Optional[int] , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : List[Any] ) -> Dict:
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def a_ ( self : Any , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@contextmanager
def a_ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
A__ = True
A__ = self.tokenizer
yield
A__ = self.feature_extractor
A__ = False
| 176
| 1
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : List[str] = {
"Intel/dpt-large": "https://huggingface.co/Intel/dpt-large/resolve/main/config.json",
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class __magic_name__ ( _lowerCamelCase):
A: str = "dpt"
def __init__( self : List[str] , lowerCamelCase__ : int=768 , lowerCamelCase__ : str=12 , lowerCamelCase__ : List[Any]=12 , lowerCamelCase__ : Optional[Any]=3072 , lowerCamelCase__ : Optional[Any]="gelu" , lowerCamelCase__ : Optional[int]=0.0 , lowerCamelCase__ : Union[str, Any]=0.0 , lowerCamelCase__ : int=0.02 , lowerCamelCase__ : Dict=1E-1_2 , lowerCamelCase__ : Optional[int]=384 , lowerCamelCase__ : Union[str, Any]=16 , lowerCamelCase__ : List[str]=3 , lowerCamelCase__ : Dict=False , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : str=[2, 5, 8, 11] , lowerCamelCase__ : Dict="project" , lowerCamelCase__ : Any=[4, 2, 1, 0.5] , lowerCamelCase__ : List[str]=[96, 192, 384, 768] , lowerCamelCase__ : List[str]=256 , lowerCamelCase__ : Union[str, Any]=-1 , lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Any=0.4 , lowerCamelCase__ : int=255 , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : Tuple=[1, 1024, 24, 24] , lowerCamelCase__ : Optional[int]=[0, 1] , lowerCamelCase__ : Tuple=None , **lowerCamelCase__ : Tuple , ) -> str:
'''simple docstring'''
super().__init__(**A__ )
UpperCamelCase__ : str = hidden_size
UpperCamelCase__ : Any = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('''Initializing the config with a `BiT` backbone.''' )
UpperCamelCase__ : str = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
}
UpperCamelCase__ : int = BitConfig(**A__ )
elif isinstance(A__ , A__ ):
logger.info('''Initializing the config with a `BiT` backbone.''' )
UpperCamelCase__ : int = BitConfig(**A__ )
elif isinstance(A__ , A__ ):
UpperCamelCase__ : Optional[Any] = backbone_config
else:
raise ValueError(
F"backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}." )
UpperCamelCase__ : Dict = backbone_featmap_shape
UpperCamelCase__ : str = neck_ignore_stages
if readout_type != "project":
raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' )
else:
UpperCamelCase__ : List[str] = None
UpperCamelCase__ : Optional[Any] = None
UpperCamelCase__ : Optional[int] = []
UpperCamelCase__ : Tuple = num_hidden_layers
UpperCamelCase__ : Any = num_attention_heads
UpperCamelCase__ : int = intermediate_size
UpperCamelCase__ : Tuple = hidden_act
UpperCamelCase__ : List[str] = hidden_dropout_prob
UpperCamelCase__ : Any = attention_probs_dropout_prob
UpperCamelCase__ : str = initializer_range
UpperCamelCase__ : Optional[Any] = layer_norm_eps
UpperCamelCase__ : List[Any] = image_size
UpperCamelCase__ : Union[str, Any] = patch_size
UpperCamelCase__ : int = num_channels
UpperCamelCase__ : Union[str, Any] = qkv_bias
UpperCamelCase__ : Optional[int] = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' )
UpperCamelCase__ : List[str] = readout_type
UpperCamelCase__ : List[Any] = reassemble_factors
UpperCamelCase__ : List[str] = neck_hidden_sizes
UpperCamelCase__ : Optional[int] = fusion_hidden_size
UpperCamelCase__ : List[Any] = head_in_index
UpperCamelCase__ : Tuple = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
UpperCamelCase__ : Dict = use_auxiliary_head
UpperCamelCase__ : List[Any] = auxiliary_loss_weight
UpperCamelCase__ : List[str] = semantic_loss_ignore_index
UpperCamelCase__ : Optional[Any] = semantic_classifier_dropout
def UpperCAmelCase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ : List[Any] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
UpperCamelCase__ : Tuple = self.backbone_config.to_dict()
UpperCamelCase__ : str = self.__class__.model_type
return output
| 700
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __magic_name__ ( __lowerCAmelCase):
A: Optional[Any] = ["image_processor", "tokenizer"]
A: Optional[Any] = "LayoutLMv2ImageProcessor"
A: List[str] = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self : Dict , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : List[str]=None , **lowerCamelCase__ : Any ) -> Optional[Any]:
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowerCamelCase__ , )
UpperCamelCase__ : str = kwargs.pop('''feature_extractor''' )
UpperCamelCase__ : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
def __call__( self : Optional[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCamelCase__ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowerCamelCase__ : Union[List[List[int]], List[List[List[int]]]] = None , lowerCamelCase__ : Optional[Union[List[int], List[List[int]]]] = None , lowerCamelCase__ : bool = True , lowerCamelCase__ : Union[bool, str, PaddingStrategy] = False , lowerCamelCase__ : Union[bool, str, TruncationStrategy] = None , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : int = 0 , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , **lowerCamelCase__ : Any , ) -> BatchEncoding:
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
UpperCamelCase__ : Optional[Any] = self.image_processor(images=lowerCamelCase__ , return_tensors=lowerCamelCase__ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
UpperCamelCase__ : Optional[int] = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCamelCase__ : Optional[int] = features['''words''']
UpperCamelCase__ : Optional[int] = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
# add pixel values
UpperCamelCase__ : Optional[Any] = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
UpperCamelCase__ : Union[str, Any] = self.get_overflowing_images(lowerCamelCase__ , encoded_inputs['''overflow_to_sample_mapping'''] )
UpperCamelCase__ : Tuple = images
return encoded_inputs
def UpperCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Any , lowerCamelCase__ : List[Any] ) -> str:
'''simple docstring'''
UpperCamelCase__ : int = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
F" {len(lowerCamelCase__ )} and {len(lowerCamelCase__ )}" )
return images_with_overflow
def UpperCAmelCase__ ( self : int , *lowerCamelCase__ : Dict , **lowerCamelCase__ : str ) -> str:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase__ ( self : List[str] , *lowerCamelCase__ : int , **lowerCamelCase__ : str ) -> Tuple:
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@property
def UpperCAmelCase__ ( self : int ) -> str:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowerCamelCase__ , )
return self.image_processor_class
@property
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowerCamelCase__ , )
return self.image_processor
| 106
| 0
|
lowercase_ : str = [
'DownloadConfig',
'DownloadManager',
'DownloadMode',
'StreamingDownloadManager',
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 64
|
from __future__ import annotations
from collections import deque
class _lowerCamelCase :
def __init__( self , lowerCAmelCase ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: list[dict]= []
self.adlist.append(
{'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []} )
for keyword in keywords:
self.add_keyword(lowerCAmelCase )
self.set_fail_transitions()
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> int | None:
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def UpperCamelCase_ ( self , lowerCAmelCase ) -> None:
SCREAMING_SNAKE_CASE__: str= 0
for character in keyword:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.find_next_state(lowerCAmelCase , lowerCAmelCase )
if next_state is None:
self.adlist.append(
{
'''value''': character,
'''next_states''': [],
'''fail_state''': 0,
'''output''': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
SCREAMING_SNAKE_CASE__: Dict= len(self.adlist ) - 1
else:
SCREAMING_SNAKE_CASE__: List[Any]= next_state
self.adlist[current_state]["output"].append(lowerCAmelCase )
def UpperCamelCase_ ( self ) -> None:
SCREAMING_SNAKE_CASE__: deque= deque()
for node in self.adlist[0]["next_states"]:
q.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= 0
while q:
SCREAMING_SNAKE_CASE__: Union[str, Any]= q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= self.adlist[r]['''fail_state''']
while (
self.find_next_state(lowerCAmelCase , self.adlist[child]['''value'''] ) is None
and state != 0
):
SCREAMING_SNAKE_CASE__: Tuple= self.adlist[state]['''fail_state''']
SCREAMING_SNAKE_CASE__: Dict= self.find_next_state(
lowerCAmelCase , self.adlist[child]['''value'''] )
if self.adlist[child]["fail_state"] is None:
SCREAMING_SNAKE_CASE__: Union[str, Any]= 0
SCREAMING_SNAKE_CASE__: str= (
self.adlist[child]['''output''']
+ self.adlist[self.adlist[child]['''fail_state''']]['''output''']
)
def UpperCamelCase_ ( self , lowerCAmelCase ) -> dict[str, list[int]]:
SCREAMING_SNAKE_CASE__: dict= {} # returns a dict with keywords and list of its occurrences
SCREAMING_SNAKE_CASE__: Optional[Any]= 0
for i in range(len(lowerCAmelCase ) ):
while (
self.find_next_state(lowerCAmelCase , string[i] ) is None
and current_state != 0
):
SCREAMING_SNAKE_CASE__: Optional[int]= self.adlist[current_state]['''fail_state''']
SCREAMING_SNAKE_CASE__: Optional[int]= self.find_next_state(lowerCAmelCase , string[i] )
if next_state is None:
SCREAMING_SNAKE_CASE__: List[Any]= 0
else:
SCREAMING_SNAKE_CASE__: Dict= next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
SCREAMING_SNAKE_CASE__: Optional[Any]= []
result[key].append(i - len(lowerCAmelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64
| 1
|
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def lowerCAmelCase_ ( __UpperCAmelCase: Dict ) -> Optional[int]:
UpperCamelCase__ : Dict = {}
UpperCamelCase__ : Dict = job['''started_at''']
UpperCamelCase__ : List[str] = job['''completed_at''']
UpperCamelCase__ : int = date_parser.parse(a_ )
UpperCamelCase__ : List[Any] = date_parser.parse(a_ )
UpperCamelCase__ : int = round((end_datetime - start_datetime).total_seconds() / 60.0 )
UpperCamelCase__ : Any = start
UpperCamelCase__ : Optional[Any] = end
UpperCamelCase__ : List[str] = duration_in_min
return job_info
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[Any] , __UpperCAmelCase: Optional[int]=None ) -> Any:
UpperCamelCase__ : List[str] = None
if token is not None:
UpperCamelCase__ : List[Any] = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f"Bearer {token}"}
UpperCamelCase__ : Dict = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
UpperCamelCase__ : int = requests.get(a_ , headers=a_ ).json()
UpperCamelCase__ : Any = {}
try:
job_time.update({job['''name''']: extract_time_from_single_job(a_ ) for job in result['''jobs''']} )
UpperCamelCase__ : int = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(a_ ):
UpperCamelCase__ : List[Any] = requests.get(url + f"&page={i + 2}" , headers=a_ ).json()
job_time.update({job['''name''']: extract_time_from_single_job(a_ ) for job in result['''jobs''']} )
return job_time
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = get_job_time(args.workflow_run_id)
UpperCAmelCase_ = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F'''{k}: {v["duration"]}''')
| 705
|
# Lint as: python3
import itertools
import os
import re
UpperCAmelCase_ = re.compile(R'([A-Z]+)([A-Z][a-z])')
UpperCAmelCase_ = re.compile(R'([a-z\d])([A-Z])')
UpperCAmelCase_ = re.compile(R'(?<!_)_(?!_)')
UpperCAmelCase_ = re.compile(R'(_{2,})')
UpperCAmelCase_ = R'^\w+(\.\w+)*$'
UpperCAmelCase_ = R'<>:/\|?*'
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[int] ) -> List[Any]:
UpperCamelCase__ : int = _uppercase_uppercase_re.sub(r'''\1_\2''' , __UpperCAmelCase )
UpperCamelCase__ : Tuple = _lowercase_uppercase_re.sub(r'''\1_\2''' , __UpperCAmelCase )
return name.lower()
def lowerCAmelCase_ ( __UpperCAmelCase: Any ) -> Any:
UpperCamelCase__ : Optional[int] = _single_underscore_re.split(__UpperCAmelCase )
UpperCamelCase__ : Dict = [_multiple_underscores_re.split(__UpperCAmelCase ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(__UpperCAmelCase ) if n != '''''' )
def lowerCAmelCase_ ( __UpperCAmelCase: int ) -> Union[str, Any]:
if os.path.basename(__UpperCAmelCase ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(__UpperCAmelCase )
def lowerCAmelCase_ ( __UpperCAmelCase: Dict , __UpperCAmelCase: Optional[int] ) -> Optional[Any]:
if os.path.basename(__UpperCAmelCase ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re , __UpperCAmelCase ):
raise ValueError(f"Split name should match '{_split_re}'' but got '{split}'." )
return f"{filename_prefix_for_name(__UpperCAmelCase )}-{split}"
def lowerCAmelCase_ ( __UpperCAmelCase: str , __UpperCAmelCase: Dict , __UpperCAmelCase: List[Any] , __UpperCAmelCase: List[Any]=None ) -> Union[str, Any]:
UpperCamelCase__ : Optional[Any] = filename_prefix_for_split(__UpperCAmelCase , __UpperCAmelCase )
if filetype_suffix:
prefix += f".{filetype_suffix}"
UpperCamelCase__ : Dict = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
return f"{filepath}*"
def lowerCAmelCase_ ( __UpperCAmelCase: List[str] , __UpperCAmelCase: List[str] , __UpperCAmelCase: int , __UpperCAmelCase: str=None , __UpperCAmelCase: Tuple=None ) -> Tuple:
UpperCamelCase__ : str = filename_prefix_for_split(__UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase__ : Tuple = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
if shard_lengths:
UpperCamelCase__ : Optional[Any] = len(__UpperCAmelCase )
UpperCamelCase__ : List[str] = [f"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(__UpperCAmelCase )]
if filetype_suffix:
UpperCamelCase__ : Union[str, Any] = [filename + f".{filetype_suffix}" for filename in filenames]
return filenames
else:
UpperCamelCase__ : Tuple = prefix
if filetype_suffix:
filename += f".{filetype_suffix}"
return [filename]
| 369
| 0
|
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase__ ( A_: Dict , A_: Optional[int] , A_: Tuple ) -> Tuple:
"""simple docstring"""
__UpperCAmelCase =LxmertConfig.from_json_file(A_ )
print(F'''Building PyTorch model from configuration: {config}''' )
__UpperCAmelCase =LxmertForPreTraining(A_ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(A_ , A_ , A_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , A_ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__A = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 68
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {
'configuration_mobilebert': [
'MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileBertConfig',
'MobileBertOnnxConfig',
],
'tokenization_mobilebert': ['MobileBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['MobileBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileBertForMaskedLM',
'MobileBertForMultipleChoice',
'MobileBertForNextSentencePrediction',
'MobileBertForPreTraining',
'MobileBertForQuestionAnswering',
'MobileBertForSequenceClassification',
'MobileBertForTokenClassification',
'MobileBertLayer',
'MobileBertModel',
'MobileBertPreTrainedModel',
'load_tf_weights_in_mobilebert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileBertForMaskedLM',
'TFMobileBertForMultipleChoice',
'TFMobileBertForNextSentencePrediction',
'TFMobileBertForPreTraining',
'TFMobileBertForQuestionAnswering',
'TFMobileBertForSequenceClassification',
'TFMobileBertForTokenClassification',
'TFMobileBertMainLayer',
'TFMobileBertModel',
'TFMobileBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 406
| 0
|
from ..utils import DummyObject, requires_backends
class A_ ( metaclass=SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : List[Any] = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Optional[Any] ,*SCREAMING_SNAKE_CASE__ : Optional[int] ,**SCREAMING_SNAKE_CASE__ : List[str]):
requires_backends(self ,['torch', 'transformers', 'onnx'])
@classmethod
def lowerCAmelCase ( cls : List[str] ,*SCREAMING_SNAKE_CASE__ : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : str):
requires_backends(cls ,['torch', 'transformers', 'onnx'])
@classmethod
def lowerCAmelCase ( cls : Optional[Any] ,*SCREAMING_SNAKE_CASE__ : Dict ,**SCREAMING_SNAKE_CASE__ : str):
requires_backends(cls ,['torch', 'transformers', 'onnx'])
class A_ ( metaclass=SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Union[str, Any] = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : str ,*SCREAMING_SNAKE_CASE__ : List[str] ,**SCREAMING_SNAKE_CASE__ : List[str]):
requires_backends(self ,['torch', 'transformers', 'onnx'])
@classmethod
def lowerCAmelCase ( cls : List[str] ,*SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : List[str]):
requires_backends(cls ,['torch', 'transformers', 'onnx'])
@classmethod
def lowerCAmelCase ( cls : Dict ,*SCREAMING_SNAKE_CASE__ : Tuple ,**SCREAMING_SNAKE_CASE__ : Tuple):
requires_backends(cls ,['torch', 'transformers', 'onnx'])
class A_ ( metaclass=SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Tuple = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Dict ,*SCREAMING_SNAKE_CASE__ : Any ,**SCREAMING_SNAKE_CASE__ : int):
requires_backends(self ,['torch', 'transformers', 'onnx'])
@classmethod
def lowerCAmelCase ( cls : List[Any] ,*SCREAMING_SNAKE_CASE__ : Optional[int] ,**SCREAMING_SNAKE_CASE__ : Any):
requires_backends(cls ,['torch', 'transformers', 'onnx'])
@classmethod
def lowerCAmelCase ( cls : List[str] ,*SCREAMING_SNAKE_CASE__ : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : List[str]):
requires_backends(cls ,['torch', 'transformers', 'onnx'])
class A_ ( metaclass=SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Union[str, Any] = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Dict ,*SCREAMING_SNAKE_CASE__ : Optional[int] ,**SCREAMING_SNAKE_CASE__ : Union[str, Any]):
requires_backends(self ,['torch', 'transformers', 'onnx'])
@classmethod
def lowerCAmelCase ( cls : List[str] ,*SCREAMING_SNAKE_CASE__ : str ,**SCREAMING_SNAKE_CASE__ : Union[str, Any]):
requires_backends(cls ,['torch', 'transformers', 'onnx'])
@classmethod
def lowerCAmelCase ( cls : Union[str, Any] ,*SCREAMING_SNAKE_CASE__ : List[str] ,**SCREAMING_SNAKE_CASE__ : List[Any]):
requires_backends(cls ,['torch', 'transformers', 'onnx'])
class A_ ( metaclass=SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Tuple = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Optional[int] ,*SCREAMING_SNAKE_CASE__ : Optional[int] ,**SCREAMING_SNAKE_CASE__ : List[Any]):
requires_backends(self ,['torch', 'transformers', 'onnx'])
@classmethod
def lowerCAmelCase ( cls : Any ,*SCREAMING_SNAKE_CASE__ : Optional[Any] ,**SCREAMING_SNAKE_CASE__ : Optional[int]):
requires_backends(cls ,['torch', 'transformers', 'onnx'])
@classmethod
def lowerCAmelCase ( cls : int ,*SCREAMING_SNAKE_CASE__ : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : Union[str, Any]):
requires_backends(cls ,['torch', 'transformers', 'onnx'])
class A_ ( metaclass=SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : List[str] = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Any ,*SCREAMING_SNAKE_CASE__ : Optional[int] ,**SCREAMING_SNAKE_CASE__ : Dict):
requires_backends(self ,['torch', 'transformers', 'onnx'])
@classmethod
def lowerCAmelCase ( cls : Union[str, Any] ,*SCREAMING_SNAKE_CASE__ : List[str] ,**SCREAMING_SNAKE_CASE__ : int):
requires_backends(cls ,['torch', 'transformers', 'onnx'])
@classmethod
def lowerCAmelCase ( cls : Any ,*SCREAMING_SNAKE_CASE__ : List[str] ,**SCREAMING_SNAKE_CASE__ : int):
requires_backends(cls ,['torch', 'transformers', 'onnx'])
| 337
|
from ..utils import DummyObject, requires_backends
class A_ ( metaclass=SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : List[Any] = ['''sentencepiece''']
def __init__( self : Any ,*SCREAMING_SNAKE_CASE__ : List[str] ,**SCREAMING_SNAKE_CASE__ : str):
requires_backends(self ,['sentencepiece'])
class A_ ( metaclass=SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Tuple = ['''sentencepiece''']
def __init__( self : Optional[Any] ,*SCREAMING_SNAKE_CASE__ : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : List[str]):
requires_backends(self ,['sentencepiece'])
class A_ ( metaclass=SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : str = ['''sentencepiece''']
def __init__( self : List[str] ,*SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : Union[str, Any]):
requires_backends(self ,['sentencepiece'])
class A_ ( metaclass=SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : int = ['''sentencepiece''']
def __init__( self : List[str] ,*SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : Optional[int]):
requires_backends(self ,['sentencepiece'])
class A_ ( metaclass=SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : List[str] = ['''sentencepiece''']
def __init__( self : str ,*SCREAMING_SNAKE_CASE__ : str ,**SCREAMING_SNAKE_CASE__ : List[str]):
requires_backends(self ,['sentencepiece'])
class A_ ( metaclass=SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : int = ['''sentencepiece''']
def __init__( self : int ,*SCREAMING_SNAKE_CASE__ : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : Optional[int]):
requires_backends(self ,['sentencepiece'])
class A_ ( metaclass=SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Dict = ['''sentencepiece''']
def __init__( self : Union[str, Any] ,*SCREAMING_SNAKE_CASE__ : List[Any] ,**SCREAMING_SNAKE_CASE__ : Dict):
requires_backends(self ,['sentencepiece'])
class A_ ( metaclass=SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Dict = ['''sentencepiece''']
def __init__( self : Optional[Any] ,*SCREAMING_SNAKE_CASE__ : Dict ,**SCREAMING_SNAKE_CASE__ : Tuple):
requires_backends(self ,['sentencepiece'])
class A_ ( metaclass=SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Dict = ['''sentencepiece''']
def __init__( self : Any ,*SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : Union[str, Any]):
requires_backends(self ,['sentencepiece'])
class A_ ( metaclass=SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Union[str, Any] = ['''sentencepiece''']
def __init__( self : List[str] ,*SCREAMING_SNAKE_CASE__ : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : Optional[int]):
requires_backends(self ,['sentencepiece'])
class A_ ( metaclass=SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : int = ['''sentencepiece''']
def __init__( self : Dict ,*SCREAMING_SNAKE_CASE__ : Dict ,**SCREAMING_SNAKE_CASE__ : Union[str, Any]):
requires_backends(self ,['sentencepiece'])
class A_ ( metaclass=SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : str = ['''sentencepiece''']
def __init__( self : Union[str, Any] ,*SCREAMING_SNAKE_CASE__ : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : Optional[int]):
requires_backends(self ,['sentencepiece'])
class A_ ( metaclass=SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Dict = ['''sentencepiece''']
def __init__( self : Tuple ,*SCREAMING_SNAKE_CASE__ : Optional[Any] ,**SCREAMING_SNAKE_CASE__ : int):
requires_backends(self ,['sentencepiece'])
class A_ ( metaclass=SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Any = ['''sentencepiece''']
def __init__( self : Optional[Any] ,*SCREAMING_SNAKE_CASE__ : Dict ,**SCREAMING_SNAKE_CASE__ : int):
requires_backends(self ,['sentencepiece'])
class A_ ( metaclass=SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Optional[int] = ['''sentencepiece''']
def __init__( self : Union[str, Any] ,*SCREAMING_SNAKE_CASE__ : Dict ,**SCREAMING_SNAKE_CASE__ : Optional[Any]):
requires_backends(self ,['sentencepiece'])
class A_ ( metaclass=SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Optional[int] = ['''sentencepiece''']
def __init__( self : Tuple ,*SCREAMING_SNAKE_CASE__ : List[str] ,**SCREAMING_SNAKE_CASE__ : List[str]):
requires_backends(self ,['sentencepiece'])
class A_ ( metaclass=SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : str = ['''sentencepiece''']
def __init__( self : Dict ,*SCREAMING_SNAKE_CASE__ : List[Any] ,**SCREAMING_SNAKE_CASE__ : List[Any]):
requires_backends(self ,['sentencepiece'])
class A_ ( metaclass=SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Any = ['''sentencepiece''']
def __init__( self : str ,*SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : int):
requires_backends(self ,['sentencepiece'])
class A_ ( metaclass=SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Optional[int] = ['''sentencepiece''']
def __init__( self : Optional[Any] ,*SCREAMING_SNAKE_CASE__ : str ,**SCREAMING_SNAKE_CASE__ : Dict):
requires_backends(self ,['sentencepiece'])
class A_ ( metaclass=SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Optional[Any] = ['''sentencepiece''']
def __init__( self : Union[str, Any] ,*SCREAMING_SNAKE_CASE__ : Dict ,**SCREAMING_SNAKE_CASE__ : Optional[Any]):
requires_backends(self ,['sentencepiece'])
class A_ ( metaclass=SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : str = ['''sentencepiece''']
def __init__( self : Optional[Any] ,*SCREAMING_SNAKE_CASE__ : Any ,**SCREAMING_SNAKE_CASE__ : Any):
requires_backends(self ,['sentencepiece'])
class A_ ( metaclass=SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Tuple = ['''sentencepiece''']
def __init__( self : List[Any] ,*SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : str):
requires_backends(self ,['sentencepiece'])
class A_ ( metaclass=SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Tuple = ['''sentencepiece''']
def __init__( self : Tuple ,*SCREAMING_SNAKE_CASE__ : Dict ,**SCREAMING_SNAKE_CASE__ : str):
requires_backends(self ,['sentencepiece'])
class A_ ( metaclass=SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Union[str, Any] = ['''sentencepiece''']
def __init__( self : Dict ,*SCREAMING_SNAKE_CASE__ : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : Optional[int]):
requires_backends(self ,['sentencepiece'])
class A_ ( metaclass=SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : List[str] = ['''sentencepiece''']
def __init__( self : Optional[Any] ,*SCREAMING_SNAKE_CASE__ : Optional[Any] ,**SCREAMING_SNAKE_CASE__ : Tuple):
requires_backends(self ,['sentencepiece'])
class A_ ( metaclass=SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : int = ['''sentencepiece''']
def __init__( self : Optional[int] ,*SCREAMING_SNAKE_CASE__ : str ,**SCREAMING_SNAKE_CASE__ : Optional[Any]):
requires_backends(self ,['sentencepiece'])
class A_ ( metaclass=SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : List[Any] = ['''sentencepiece''']
def __init__( self : Any ,*SCREAMING_SNAKE_CASE__ : Optional[int] ,**SCREAMING_SNAKE_CASE__ : int):
requires_backends(self ,['sentencepiece'])
class A_ ( metaclass=SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Any = ['''sentencepiece''']
def __init__( self : str ,*SCREAMING_SNAKE_CASE__ : Any ,**SCREAMING_SNAKE_CASE__ : Dict):
requires_backends(self ,['sentencepiece'])
class A_ ( metaclass=SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Optional[Any] = ['''sentencepiece''']
def __init__( self : Dict ,*SCREAMING_SNAKE_CASE__ : Tuple ,**SCREAMING_SNAKE_CASE__ : Optional[Any]):
requires_backends(self ,['sentencepiece'])
class A_ ( metaclass=SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Union[str, Any] = ['''sentencepiece''']
def __init__( self : Any ,*SCREAMING_SNAKE_CASE__ : Dict ,**SCREAMING_SNAKE_CASE__ : int):
requires_backends(self ,['sentencepiece'])
class A_ ( metaclass=SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Any = ['''sentencepiece''']
def __init__( self : Optional[int] ,*SCREAMING_SNAKE_CASE__ : Dict ,**SCREAMING_SNAKE_CASE__ : List[str]):
requires_backends(self ,['sentencepiece'])
| 337
| 1
|
'''simple docstring'''
def _snake_case ( _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
lowerCAmelCase = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 433
|
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {'vocab_file': 'spiece.model'}
UpperCAmelCase = {
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
def __init__( self , A_ , A_=False , A_=True , A_=False , A_="<s>" , A_="</s>" , A_="<unk>" , A_="<sep>" , A_="<pad>" , A_="<cls>" , A_="<mask>" , A_=["<eop>", "<eod>"] , A_ = None , **A_ , ) -> None:
lowerCAmelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=A_ , remove_space=A_ , keep_accents=A_ , bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , pad_token=A_ , cls_token=A_ , mask_token=A_ , additional_special_tokens=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , )
lowerCAmelCase = 3
lowerCAmelCase = do_lower_case
lowerCAmelCase = remove_space
lowerCAmelCase = keep_accents
lowerCAmelCase = vocab_file
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A_ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
lowerCAmelCase = jieba
lowerCAmelCase = str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def __snake_case ( self ) -> List[Any]:
return len(self.sp_model )
def __snake_case ( self ) -> str:
lowerCAmelCase = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> str:
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self , A_ ) -> List[Any]:
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __snake_case ( self , A_ ) -> str:
if self.remove_space:
lowerCAmelCase = """ """.join(inputs.strip().split() )
else:
lowerCAmelCase = inputs
lowerCAmelCase = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
lowerCAmelCase = unicodedata.normalize("""NFKD""" , A_ )
lowerCAmelCase = """""".join([c for c in outputs if not unicodedata.combining(A_ )] )
if self.do_lower_case:
lowerCAmelCase = outputs.lower()
return outputs
def __snake_case ( self , A_ ) -> List[str]:
lowerCAmelCase = self.preprocess_text(A_ )
lowerCAmelCase = self.sp_model.encode(A_ , out_type=A_ )
lowerCAmelCase = []
for piece in pieces:
if len(A_ ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
lowerCAmelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(A_ , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCAmelCase = cur_pieces[1:]
else:
lowerCAmelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(A_ )
else:
new_pieces.append(A_ )
return new_pieces
def __snake_case ( self , A_ ) -> List[str]:
return self.sp_model.PieceToId(A_ )
def __snake_case ( self , A_ ) -> int:
return self.sp_model.IdToPiece(A_ )
def __snake_case ( self , A_ ) -> Any:
lowerCAmelCase = """""".join(A_ ).replace(A_ , """ """ ).strip()
return out_string
def __snake_case ( self , A_ , A_ = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __snake_case ( self , A_ , A_ = None , A_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is not None:
return ([0] * len(A_ )) + [1] + ([0] * len(A_ )) + [1, 1]
return ([0] * len(A_ )) + [1, 1]
def __snake_case ( self , A_ , A_ = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __snake_case ( self , A_ , A_ = None ) -> Tuple[str]:
if not os.path.isdir(A_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase = os.path.join(
A_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ , """wb""" ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,)
def __snake_case ( self , *A_ , **A_ ) -> Optional[int]:
lowerCAmelCase = super()._decode(*A_ , **A_ )
lowerCAmelCase = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text
| 433
| 1
|
'''simple docstring'''
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=14 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=99 , UpperCAmelCase=32 , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=37 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=16 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=None , ) -> int:
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_token_type_ids
lowercase_ = use_input_mask
lowercase_ = use_labels
lowercase_ = use_mc_token_ids
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = num_labels
lowercase_ = num_choices
lowercase_ = scope
lowercase_ = self.vocab_size - 1
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ = None
if self.use_input_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ = None
if self.use_token_type_ids:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ = None
if self.use_mc_token_ids:
lowercase_ = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
lowercase_ = None
lowercase_ = None
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ = self.get_config()
lowercase_ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def A__ ( self ) -> int:
'''simple docstring'''
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = CTRLModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
model(__lowerCamelCase , token_type_ids=__lowerCamelCase , head_mask=__lowerCamelCase )
model(__lowerCamelCase , token_type_ids=__lowerCamelCase )
lowercase_ = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowercase_ = CTRLLMHeadModel(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowercase_ = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
(
lowercase_
) = config_and_inputs
lowercase_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask}
return config, inputs_dict
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = self.num_labels
lowercase_ = CTRLForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
lowerCAmelCase__ = (CTRLLMHeadModel,) if is_torch_available() else ()
lowerCAmelCase__ = (
{
"feature-extraction": CTRLModel,
"text-classification": CTRLForSequenceClassification,
"text-generation": CTRLLMHeadModel,
"zero-shot": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ = CTRLModelTester(self )
lowercase_ = ConfigTester(self , config_class=__lowerCamelCase , n_embd=37 )
def A__ ( self ) -> str:
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__lowerCamelCase )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__lowerCamelCase )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@slow
def A__ ( self ) -> str:
'''simple docstring'''
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = CTRLModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@unittest.skip("The model doesn\'t support left padding" ) # and it's not used enough to be worth fixing :)
def A__ ( self ) -> List[Any]:
'''simple docstring'''
pass
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = CTRLLMHeadModel.from_pretrained("ctrl" )
model.to(__lowerCamelCase )
lowercase_ = torch.tensor(
[[11859, 0, 1611, 8]] , dtype=torch.long , device=__lowerCamelCase ) # Legal the president is
lowercase_ = [
11859,
0,
1611,
8,
5,
150,
26449,
2,
19,
348,
469,
3,
2595,
48,
20740,
246533,
246533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
lowercase_ = model.generate(__lowerCamelCase , do_sample=__lowerCamelCase )
self.assertListEqual(output_ids[0].tolist() , __lowerCamelCase )
| 702
|
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class __lowerCamelCase ( pl.LightningModule ):
"""simple docstring"""
def __init__( self , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
super().__init__()
lowercase_ = model
lowercase_ = 2
lowercase_ = nn.Linear(self.model.config.hidden_size , self.num_labels )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: str , __lowerCamelCase: str , __lowerCamelCase: str ):
'''simple docstring'''
lowercase_ = LongformerModel.from_pretrained(__lowerCamelCase )
lowercase_ = LightningModel(__lowerCamelCase )
lowercase_ = torch.load(__lowerCamelCase , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
lowercase_ = LongformerForQuestionAnswering.from_pretrained(__lowerCamelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(__lowerCamelCase )
print(F'Conversion successful. Model saved under {pytorch_dump_folder_path}' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--longformer_model""",
default=None,
type=str,
required=True,
help="""model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.""",
)
parser.add_argument(
"""--longformer_question_answering_ckpt_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch Lightning Checkpoint.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 601
| 0
|
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def __magic_name__ ( _lowerCamelCase: Optional[Any] ) -> int:
'''simple docstring'''
lowerCAmelCase = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase, _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase: Tuple ) -> str:
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase = emb.weight.shape
lowerCAmelCase = nn.Linear(_lowerCamelCase, _lowerCamelCase, bias=_lowerCamelCase )
lowerCAmelCase = emb.weight.data
return lin_layer
def __magic_name__ ( _lowerCamelCase: Optional[int], _lowerCamelCase: Any="facebook/mbart-large-en-ro", _lowerCamelCase: List[str]=False, _lowerCamelCase: Tuple=False ) -> Dict:
'''simple docstring'''
lowerCAmelCase = torch.load(_lowerCamelCase, map_location='''cpu''' )['''model''']
remove_ignore_keys_(_lowerCamelCase )
lowerCAmelCase = state_dict['''encoder.embed_tokens.weight'''].shape[0]
lowerCAmelCase = MBartConfig.from_pretrained(_lowerCamelCase, vocab_size=_lowerCamelCase )
if mbart_aa and finetuned:
lowerCAmelCase = '''relu'''
lowerCAmelCase = state_dict['''decoder.embed_tokens.weight''']
lowerCAmelCase = MBartForConditionalGeneration(_lowerCamelCase )
model.model.load_state_dict(_lowerCamelCase )
if finetuned:
lowerCAmelCase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem."""
)
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--hf_config""",
default="""facebook/mbart-large-cc25""",
type=str,
help="""Which huggingface architecture to use: mbart-large""",
)
parser.add_argument("""--mbart_50""", action="""store_true""", help="""whether the model is mMART-50 checkpoint""")
parser.add_argument("""--finetuned""", action="""store_true""", help="""whether the model is a fine-tuned checkpoint""")
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 535
|
"""simple docstring"""
def __magic_name__ ( _lowerCamelCase: Tuple, _lowerCamelCase: Any ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase = [1]
for i in range(2, _lowerCamelCase ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
lowerCAmelCase = []
lowerCAmelCase = list(range(_lowerCamelCase ) )
# Find permutation
while factorials:
lowerCAmelCase = factorials.pop()
lowerCAmelCase , lowerCAmelCase = divmod(_lowerCamelCase, _lowerCamelCase )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 535
| 1
|
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = DistilBertTokenizer
lowerCamelCase_ = DistilBertTokenizerFast
lowerCamelCase_ = True
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = DistilBertTokenizer.from_pretrained('distilbert-base-uncased' )
A_ : Tuple = tokenizer.encode('sequence builders' , add_special_tokens=lowercase )
A_ : List[Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=lowercase )
A_ : str = tokenizer.build_inputs_with_special_tokens(lowercase )
A_ : Tuple = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 706
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCAmelCase = {
"""configuration_biogpt""": ["""BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BioGptConfig"""],
"""tokenization_biogpt""": ["""BioGptTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BioGptForCausalLM""",
"""BioGptForTokenClassification""",
"""BioGptForSequenceClassification""",
"""BioGptModel""",
"""BioGptPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 70
| 0
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ : int = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ ( a_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = XLMRobertaTokenizer
SCREAMING_SNAKE_CASE_ = XLMRobertaTokenizerFast
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
a = XLMRobertaTokenizer(__lowerCamelCase ,keep_accents=__lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
a = '''<pad>'''
a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) ,__lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) ,__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'''<s>''' )
self.assertEqual(vocab_keys[1] ,'''<pad>''' )
self.assertEqual(vocab_keys[-1] ,'''<mask>''' )
self.assertEqual(len(__lowerCamelCase ) ,10_02 )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,10_02 )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
a = XLMRobertaTokenizer(__lowerCamelCase ,keep_accents=__lowerCamelCase )
a = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__lowerCamelCase ,['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCamelCase ) ,[value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] ,)
a = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowerCamelCase ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] ,)
a = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] ,)
a = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] ,)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
a = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase ,**__lowerCamelCase )
a = self.tokenizer_class.from_pretrained(__lowerCamelCase ,**__lowerCamelCase )
a = tempfile.mkdtemp()
a = tokenizer_r.save_pretrained(__lowerCamelCase )
a = tokenizer_p.save_pretrained(__lowerCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
a = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(__lowerCamelCase ,__lowerCamelCase )
# Checks everything loads correctly in the same way
a = tokenizer_r.from_pretrained(__lowerCamelCase )
a = tokenizer_p.from_pretrained(__lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCamelCase ,__lowerCamelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__lowerCamelCase )
# Save tokenizer rust, legacy_format=True
a = tempfile.mkdtemp()
a = tokenizer_r.save_pretrained(__lowerCamelCase ,legacy_format=__lowerCamelCase )
a = tokenizer_p.save_pretrained(__lowerCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(__lowerCamelCase ,__lowerCamelCase )
# Checks everything loads correctly in the same way
a = tokenizer_r.from_pretrained(__lowerCamelCase )
a = tokenizer_p.from_pretrained(__lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCamelCase ,__lowerCamelCase ) )
shutil.rmtree(__lowerCamelCase )
# Save tokenizer rust, legacy_format=False
a = tempfile.mkdtemp()
a = tokenizer_r.save_pretrained(__lowerCamelCase ,legacy_format=__lowerCamelCase )
a = tokenizer_p.save_pretrained(__lowerCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
a = tokenizer_r.from_pretrained(__lowerCamelCase )
a = tokenizer_p.from_pretrained(__lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCamelCase ,__lowerCamelCase ) )
shutil.rmtree(__lowerCamelCase )
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''' )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__lowerCamelCase ,f.name )
a = XLMRobertaTokenizer(f.name ,keep_accents=__lowerCamelCase )
a = pickle.dumps(__lowerCamelCase )
pickle.loads(__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
a = self.get_tokenizer()
a = self.get_rust_tokenizer()
a = '''I was born in 92000, and this is falsé.'''
a = tokenizer.tokenize(__lowerCamelCase )
a = rust_tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase ,__lowerCamelCase )
a = tokenizer.encode(__lowerCamelCase ,add_special_tokens=__lowerCamelCase )
a = rust_tokenizer.encode(__lowerCamelCase ,add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase ,__lowerCamelCase )
a = self.get_rust_tokenizer()
a = tokenizer.encode(__lowerCamelCase )
a = rust_tokenizer.encode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase ,__lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
a = '''Hello World!'''
a = [0, 3_53_78, 66_61, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__lowerCamelCase ,self.big_tokenizer.encode(__lowerCamelCase ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
a = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
a = [
0,
32_93,
83,
10,
45_52,
49_89,
79_86,
6_78,
10,
59_15,
1_11,
17_94_59,
12_48_50,
4,
60_44,
2_37,
12,
6,
5,
6,
4,
67_80,
7_05,
15,
13_88,
44,
3_78,
1_01_14,
7_11,
1_52,
20,
6,
5,
2_23_76,
6_42,
12_21,
1_51_90,
3_41_53,
4_50,
56_08,
9_59,
11_19,
5_77_02,
1_36,
1_86,
47,
10_98,
2_93_67,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
60_44,
2_37,
62_84,
5_09_01,
5_28,
31,
90,
34,
9_27,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__lowerCamelCase ,self.big_tokenizer.encode(__lowerCamelCase ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
a = {'''input_ids''': [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase ,model_name='''xlm-roberta-base''' ,revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''' ,)
| 387
|
import random
from typing import Any
def __lowerCamelCase ( lowerCamelCase__ : list ):
'''simple docstring'''
for _ in range(len(lowerCamelCase__ ) ):
lowerCamelCase = random.randint(0 , len(lowerCamelCase__ ) - 1 )
lowerCamelCase = random.randint(0 , len(lowerCamelCase__ ) - 1 )
lowerCamelCase , lowerCamelCase = data[b], data[a]
return data
if __name__ == "__main__":
UpperCAmelCase : str = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCAmelCase : Union[str, Any] = ["python", "says", "hello", "!"]
print("Fisher-Yates Shuffle:")
print("List", integers, strings)
print("FY Shuffle", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 457
| 0
|
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int = 10**9 ) -> int:
__lowercase = 1
__lowercase = 2
__lowercase = 0
__lowercase = 0
__lowercase = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__lowercase = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F'''{solution() = }''')
| 688
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = ["image_processor", "tokenizer"]
lowerCAmelCase__ : Union[str, Any] = "LayoutLMv2ImageProcessor"
lowerCAmelCase__ : Union[str, Any] = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self : Optional[Any] , _UpperCAmelCase : Any=None , _UpperCAmelCase : Union[str, Any]=None , **_UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _UpperCAmelCase , )
__lowercase = kwargs.pop('feature_extractor' )
__lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _UpperCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , _UpperCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , _UpperCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , _UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : int = 0 , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , **_UpperCAmelCase : Dict , ) -> BatchEncoding:
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' )
# first, apply the image processor
__lowercase = self.image_processor(images=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [text] # add batch dimension (as the image processor always adds a batch dimension)
__lowercase = features['words']
__lowercase = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , stride=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , return_length=_UpperCAmelCase , verbose=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , )
# add pixel values
__lowercase = features.pop('pixel_values' )
if return_overflowing_tokens is True:
__lowercase = self.get_overflowing_images(_UpperCAmelCase , encoded_inputs['overflow_to_sample_mapping'] )
__lowercase = images
return encoded_inputs
def a__ ( self : Tuple , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
__lowercase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
f""" {len(_UpperCAmelCase )} and {len(_UpperCAmelCase )}""" )
return images_with_overflow
def a__ ( self : Dict , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : Optional[Any] , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def a__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def a__ ( self : str ) -> Dict:
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _UpperCAmelCase , )
return self.image_processor_class
@property
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _UpperCAmelCase , )
return self.image_processor
| 688
| 1
|
from __future__ import annotations
from fractions import Fraction
def A ( _lowercase , _lowercase ):
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : int = 11
SCREAMING_SNAKE_CASE : List[Any] = int('''1''' + '''0''' * digit_len )
for num in range(_lowercase , _lowercase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_lowercase , _lowercase ):
solutions.append(f"""{num}/{den}""" )
den += 1
num += 1
SCREAMING_SNAKE_CASE : Dict = 10
return solutions
def A ( _lowercase = 2 ):
SCREAMING_SNAKE_CASE : str = 1.0
for fraction in fraction_list(_lowercase ):
SCREAMING_SNAKE_CASE : Any = Fraction(_lowercase )
result *= frac.denominator / frac.numerator
return int(_lowercase )
if __name__ == "__main__":
print(solution())
| 248
|
def A ( _lowercase , _lowercase ):
return int((input_a, input_a).count(0 ) == 0 )
def A ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 248
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE : List[str] = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Any = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 710
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE : Optional[Any] = {'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[Any] = [
'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FocalNetForImageClassification',
'FocalNetForMaskedImageModeling',
'FocalNetBackbone',
'FocalNetModel',
'FocalNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 697
| 0
|
'''simple docstring'''
import string
def __lowercase (_SCREAMING_SNAKE_CASE :str ):
for key in range(len(string.ascii_uppercase ) ):
SCREAMING_SNAKE_CASE : Union[str, Any] = ''''''
for symbol in message:
if symbol in string.ascii_uppercase:
SCREAMING_SNAKE_CASE : Optional[Any] = string.ascii_uppercase.find(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[Any] = num - key
if num < 0:
SCREAMING_SNAKE_CASE : Optional[Any] = num + len(string.ascii_uppercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = translated + string.ascii_uppercase[num]
else:
SCREAMING_SNAKE_CASE : List[str] = translated + symbol
print(F'''Decryption using Key #{key}: {translated}''' )
def __lowercase ():
SCREAMING_SNAKE_CASE : List[str] = input('''Encrypted message: ''' )
SCREAMING_SNAKE_CASE : Tuple = message.upper()
decrypt(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 507
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
"""naver-clova-ix/donut-base""": """https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json""",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class a__ ( _lowercase ):
__magic_name__ : List[str] = "donut-swin"
__magic_name__ : Optional[int] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__(self : str, __UpperCAmelCase : Union[str, Any]=224, __UpperCAmelCase : Any=4, __UpperCAmelCase : Optional[Any]=3, __UpperCAmelCase : Union[str, Any]=96, __UpperCAmelCase : Dict=[2, 2, 6, 2], __UpperCAmelCase : int=[3, 6, 12, 24], __UpperCAmelCase : Optional[int]=7, __UpperCAmelCase : Optional[Any]=4.0, __UpperCAmelCase : List[Any]=True, __UpperCAmelCase : int=0.0, __UpperCAmelCase : Union[str, Any]=0.0, __UpperCAmelCase : Dict=0.1, __UpperCAmelCase : int="gelu", __UpperCAmelCase : Tuple=False, __UpperCAmelCase : Tuple=0.02, __UpperCAmelCase : str=1e-5, **__UpperCAmelCase : List[str], ) -> Tuple:
"""simple docstring"""
super().__init__(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE : List[Any] = image_size
SCREAMING_SNAKE_CASE : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE : int = num_channels
SCREAMING_SNAKE_CASE : Any = embed_dim
SCREAMING_SNAKE_CASE : List[Any] = depths
SCREAMING_SNAKE_CASE : Dict = len(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Optional[int] = num_heads
SCREAMING_SNAKE_CASE : List[Any] = window_size
SCREAMING_SNAKE_CASE : Tuple = mlp_ratio
SCREAMING_SNAKE_CASE : Optional[Any] = qkv_bias
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = drop_path_rate
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : List[str] = use_absolute_embeddings
SCREAMING_SNAKE_CASE : Optional[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE : Union[str, Any] = int(embed_dim * 2 ** (len(__UpperCAmelCase ) - 1) )
| 507
| 1
|
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def __UpperCamelCase ( lowerCAmelCase__ : List[Any]=""):
__a : List[str] = tempfile.mkdtemp()
return os.path.join(lowerCAmelCase__ , str(uuid.uuida()) + suffix)
@require_soundfile
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
def lowerCAmelCase (self : str ):
__a : Union[str, Any] = torch.rand(1_2 , dtype=torch.floataa ) - 0.5
__a : Union[str, Any] = AgentAudio(snake_case_ )
__a : Tuple = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(snake_case_ , agent_type.to_raw() , atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(snake_case_ ) )
# Ensure that the file contains the same value as the original tensor
__a : List[str] = sf.read(snake_case_ )
self.assertTrue(torch.allclose(snake_case_ , torch.tensor(snake_case_ ) , atol=1E-4 ) )
def lowerCAmelCase (self : Union[str, Any] ):
__a : Dict = torch.rand(1_2 , dtype=torch.floataa ) - 0.5
__a : Tuple = get_new_path(suffix='''.wav''' )
sf.write(snake_case_ , snake_case_ , 1_6_0_0_0 )
__a : Dict = AgentAudio(snake_case_ )
self.assertTrue(torch.allclose(snake_case_ , agent_type.to_raw() , atol=1E-4 ) )
self.assertEqual(agent_type.to_string() , snake_case_ )
@require_vision
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
def lowerCAmelCase (self : List[Any] ):
__a : Optional[Any] = torch.randint(0 , 2_5_6 , (6_4, 6_4, 3) )
__a : List[str] = AgentImage(snake_case_ )
__a : Tuple = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(snake_case_ , agent_type._tensor , atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case_ ) )
def lowerCAmelCase (self : Optional[int] ):
__a : str = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
__a : Optional[int] = Image.open(snake_case_ )
__a : Dict = AgentImage(snake_case_ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case_ ) )
def lowerCAmelCase (self : Any ):
__a : Dict = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
__a : Any = Image.open(snake_case_ )
__a : Union[str, Any] = AgentImage(snake_case_ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case_ ) )
class UpperCamelCase__ ( unittest.TestCase ):
def lowerCAmelCase (self : str ):
__a : int = '''Hey!'''
__a : List[str] = AgentText(snake_case_ )
self.assertEqual(snake_case_ , agent_type.to_string() )
self.assertEqual(snake_case_ , agent_type.to_raw() )
self.assertEqual(snake_case_ , snake_case_ )
| 720
|
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
lowercase__ =logging.getLogger(__name__)
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser(
description='Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'
)
parser.add_argument(
'--data_file', type=str, default='data/dump.bert-base-uncased.pickle', help='The binarized dataset.'
)
parser.add_argument(
'--token_counts_dump', type=str, default='data/token_counts.bert-base-uncased.pickle', help='The dump file.'
)
parser.add_argument('--vocab_size', default=30522, type=int)
lowercase__ =parser.parse_args()
logger.info(F"""Loading data from {args.data_file}""")
with open(args.data_file, 'rb') as fp:
lowercase__ =pickle.load(fp)
logger.info('Counting occurrences for MLM.')
lowercase__ =Counter()
for tk_ids in data:
counter.update(tk_ids)
lowercase__ =[0] * args.vocab_size
for k, v in counter.items():
lowercase__ =v
logger.info(F"""Dump to {args.token_counts_dump}""")
with open(args.token_counts_dump, 'wb') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 326
| 0
|
"""simple docstring"""
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self: List[Any] , UpperCamelCase_: Dict , UpperCamelCase_: Any , UpperCamelCase_: List[str] ):
UpperCamelCase_ =None
UpperCamelCase_ =None
UpperCamelCase_ =graph
self._normalize_graph(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase_ =len(UpperCamelCase_ )
UpperCamelCase_ =None
def UpperCamelCase__ ( self: List[str] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: List[str] ):
if sources is int:
UpperCamelCase_ =[sources]
if sinks is int:
UpperCamelCase_ =[sinks]
if len(UpperCamelCase_ ) == 0 or len(UpperCamelCase_ ) == 0:
return
UpperCamelCase_ =sources[0]
UpperCamelCase_ =sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(UpperCamelCase_ ) > 1 or len(UpperCamelCase_ ) > 1:
UpperCamelCase_ =0
for i in sources:
max_input_flow += sum(self.graph[i] )
UpperCamelCase_ =len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
UpperCamelCase_ =max_input_flow
UpperCamelCase_ =0
UpperCamelCase_ =len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
UpperCamelCase_ =max_input_flow
UpperCamelCase_ =size - 1
def UpperCamelCase__ ( self: Optional[int] ):
if self.maximum_flow_algorithm is None:
raise Exception("You need to set maximum flow algorithm before." )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def UpperCamelCase__ ( self: Any , UpperCamelCase_: Dict ):
UpperCamelCase_ =algorithm(self )
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self: Tuple , UpperCamelCase_: Optional[Any] ):
UpperCamelCase_ =flow_network
UpperCamelCase_ =flow_network.verticesCount
UpperCamelCase_ =flow_network.sourceIndex
UpperCamelCase_ =flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
UpperCamelCase_ =flow_network.graph
UpperCamelCase_ =False
def UpperCamelCase__ ( self: List[Any] ):
if not self.executed:
self._algorithm()
UpperCamelCase_ =True
def UpperCamelCase__ ( self: Union[str, Any] ):
pass
class __lowerCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self: int , UpperCamelCase_: str ):
super().__init__(UpperCamelCase_ )
# use this to save your result
UpperCamelCase_ =-1
def UpperCamelCase__ ( self: Dict ):
if not self.executed:
raise Exception("You should execute algorithm before using its result!" )
return self.maximum_flow
class __lowerCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self: str , UpperCamelCase_: Tuple ):
super().__init__(UpperCamelCase_ )
UpperCamelCase_ =[[0] * self.verticies_count for i in range(self.verticies_count )]
UpperCamelCase_ =[0] * self.verticies_count
UpperCamelCase_ =[0] * self.verticies_count
def UpperCamelCase__ ( self: Optional[int] ):
UpperCamelCase_ =self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
UpperCamelCase_ =[
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
UpperCamelCase_ =0
while i < len(UpperCamelCase_ ):
UpperCamelCase_ =vertices_list[i]
UpperCamelCase_ =self.heights[vertex_index]
self.process_vertex(UpperCamelCase_ )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(UpperCamelCase_ ) )
UpperCamelCase_ =0
else:
i += 1
UpperCamelCase_ =sum(self.preflow[self.source_index] )
def UpperCamelCase__ ( self: Optional[int] , UpperCamelCase_: Union[str, Any] ):
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(UpperCamelCase_ , UpperCamelCase_ )
self.relabel(UpperCamelCase_ )
def UpperCamelCase__ ( self: int , UpperCamelCase_: Optional[Any] , UpperCamelCase_: str ):
UpperCamelCase_ =min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def UpperCamelCase__ ( self: Any , UpperCamelCase_: Any ):
UpperCamelCase_ =None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
UpperCamelCase_ =self.heights[to_index]
if min_height is not None:
UpperCamelCase_ =min_height + 1
if __name__ == "__main__":
A_ = [0]
A_ = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
A_ = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
A_ = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
A_ = flow_network.find_maximum_flow()
print(f'''maximum flow is {maximum_flow}''')
| 391
|
'''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class A__ ( UpperCAmelCase__ ):
def __init__( self :List[str] , SCREAMING_SNAKE_CASE :Distribution , SCREAMING_SNAKE_CASE :int=None , SCREAMING_SNAKE_CASE :Tuple=None , SCREAMING_SNAKE_CASE :List[Any]=0 ) -> List[str]:
'''simple docstring'''
_a : int =1.0 if scale is None else scale
_a : Optional[Any] =0.0 if loc is None else loc
super().__init__(SCREAMING_SNAKE_CASE , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=SCREAMING_SNAKE_CASE )] )
@property
def __UpperCAmelCase ( self :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return self.base_dist.mean * self.scale + self.loc
@property
def __UpperCAmelCase ( self :Optional[int] ) -> Dict:
'''simple docstring'''
return self.base_dist.variance * self.scale**2
@property
def __UpperCAmelCase ( self :Any ) -> List[str]:
'''simple docstring'''
return self.variance.sqrt()
class A__ ( nn.Module ):
def __init__( self :Optional[int] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Dict[str, int] , SCREAMING_SNAKE_CASE :Callable[..., Tuple[torch.Tensor]] , **SCREAMING_SNAKE_CASE :Dict ) -> None:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
_a : Tuple =args_dim
_a : Tuple =nn.ModuleList([nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for dim in args_dim.values()] )
_a : Dict =domain_map
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Tuple[torch.Tensor]:
'''simple docstring'''
_a : Tuple =[proj(SCREAMING_SNAKE_CASE ) for proj in self.proj]
return self.domain_map(*SCREAMING_SNAKE_CASE )
class A__ ( nn.Module ):
def __init__( self :Dict , SCREAMING_SNAKE_CASE :Tuple ) -> int:
'''simple docstring'''
super().__init__()
_a : List[Any] =function
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Optional[int] , *SCREAMING_SNAKE_CASE :int ) -> List[Any]:
'''simple docstring'''
return self.function(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE )
class A__ :
__UpperCamelCase : type
__UpperCamelCase : int
__UpperCamelCase : Dict[str, int]
def __init__( self :Any , SCREAMING_SNAKE_CASE :int = 1 ) -> None:
'''simple docstring'''
_a : Any =dim
_a : List[Any] ={k: dim * self.args_dim[k] for k in self.args_dim}
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if self.dim == 1:
return self.distribution_class(*SCREAMING_SNAKE_CASE )
else:
return Independent(self.distribution_class(*SCREAMING_SNAKE_CASE ) , 1 )
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , ) -> Distribution:
'''simple docstring'''
_a : str =self._base_distribution(SCREAMING_SNAKE_CASE )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(SCREAMING_SNAKE_CASE , loc=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , event_dim=self.event_dim )
@property
def __UpperCAmelCase ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
return () if self.dim == 1 else (self.dim,)
@property
def __UpperCAmelCase ( self :Any ) -> int:
'''simple docstring'''
return len(self.event_shape )
@property
def __UpperCAmelCase ( self :Any ) -> float:
'''simple docstring'''
return 0.0
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :int ) -> nn.Module:
'''simple docstring'''
return ParameterProjection(
in_features=SCREAMING_SNAKE_CASE , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def __UpperCAmelCase ( self :int , *SCREAMING_SNAKE_CASE :torch.Tensor ) -> Any:
'''simple docstring'''
raise NotImplementedError()
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :torch.Tensor ) -> torch.Tensor:
'''simple docstring'''
return (x + torch.sqrt(torch.square(SCREAMING_SNAKE_CASE ) + 4.0 )) / 2.0
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
__UpperCamelCase : type = StudentT
@classmethod
def __UpperCAmelCase ( cls :int , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Union[str, Any]:
'''simple docstring'''
_a : Tuple =cls.squareplus(SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps )
_a : Optional[Any] =2.0 + cls.squareplus(SCREAMING_SNAKE_CASE )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Dict[str, int] = {"loc": 1, "scale": 1}
__UpperCamelCase : type = Normal
@classmethod
def __UpperCAmelCase ( cls :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Dict:
'''simple docstring'''
_a : List[str] =cls.squareplus(SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Dict[str, int] = {"total_count": 1, "logits": 1}
__UpperCamelCase : type = NegativeBinomial
@classmethod
def __UpperCAmelCase ( cls :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Optional[int]:
'''simple docstring'''
_a : int =cls.squareplus(SCREAMING_SNAKE_CASE )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> Distribution:
'''simple docstring'''
_a , _a : Any =distr_args
if self.dim == 1:
return self.distribution_class(total_count=SCREAMING_SNAKE_CASE , logits=SCREAMING_SNAKE_CASE )
else:
return Independent(self.distribution_class(total_count=SCREAMING_SNAKE_CASE , logits=SCREAMING_SNAKE_CASE ) , 1 )
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None ) -> Distribution:
'''simple docstring'''
_a , _a : Optional[int] =distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 694
| 0
|
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class A__ :
@property
def a__ ( self : int ) -> List[Any]:
"""simple docstring"""
return self.get_dummy_input()
@property
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f"""\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.""" )
def a__ ( self : Union[str, Any] , _UpperCAmelCase : Any=True , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : Dict=False , _UpperCAmelCase : List[str]=False , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = 4
__lowercase = 32
__lowercase = (32, 32)
__lowercase = torch.manual_seed(0 )
__lowercase = torch.device(__a )
__lowercase = (batch_size, num_channels) + sizes
__lowercase = randn_tensor(__a , generator=__a , device=__a )
__lowercase = {"""hidden_states""": hidden_states}
if include_temb:
__lowercase = 1_28
__lowercase = randn_tensor((batch_size, temb_channels) , generator=__a , device=__a )
if include_res_hidden_states_tuple:
__lowercase = torch.manual_seed(1 )
__lowercase = (randn_tensor(__a , generator=__a , device=__a ),)
if include_encoder_hidden_states:
__lowercase = floats_tensor((batch_size, 32, 32) ).to(__a )
if include_skip_sample:
__lowercase = randn_tensor(((batch_size, 3) + sizes) , generator=__a , device=__a )
return dummy_input
def a__ ( self : Dict ) -> Dict:
"""simple docstring"""
__lowercase = {
"""in_channels""": 32,
"""out_channels""": 32,
"""temb_channels""": 1_28,
}
if self.block_type == "up":
__lowercase = 32
if self.block_type == "mid":
init_dict.pop('out_channels' )
__lowercase = self.dummy_input
return init_dict, inputs_dict
def a__ ( self : str , _UpperCAmelCase : Dict ) -> Any:
"""simple docstring"""
__lowercase = self.prepare_init_args_and_inputs_for_common()
__lowercase = self.block_class(**__a )
unet_block.to(__a )
unet_block.eval()
with torch.no_grad():
__lowercase = unet_block(**__a )
if isinstance(__a , __a ):
__lowercase = output[0]
self.assertEqual(output.shape , self.output_shape )
__lowercase = output[0, -1, -3:, -3:]
__lowercase = torch.tensor(__a ).to(__a )
assert torch_all_close(output_slice.flatten() , __a , atol=5e-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def a__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.prepare_init_args_and_inputs_for_common()
__lowercase = self.block_class(**__a )
model.to(__a )
model.train()
__lowercase = model(**__a )
if isinstance(__a , __a ):
__lowercase = output[0]
__lowercase = torch.device(__a )
__lowercase = randn_tensor(output.shape , device=__a )
__lowercase = torch.nn.functional.mse_loss(__a , __a )
loss.backward()
| 720
|
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self : int , _UpperCAmelCase : str , _UpperCAmelCase : List[str]=7 , _UpperCAmelCase : List[str]=3 , _UpperCAmelCase : Any=18 , _UpperCAmelCase : Dict=30 , _UpperCAmelCase : Tuple=4_00 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Any=True , ) -> Dict:
"""simple docstring"""
__lowercase = size if size is not None else {'height': 18, 'width': 18}
__lowercase = parent
__lowercase = batch_size
__lowercase = num_channels
__lowercase = image_size
__lowercase = min_resolution
__lowercase = max_resolution
__lowercase = do_resize
__lowercase = size
__lowercase = apply_ocr
def a__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class A__ ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : int = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def a__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = LayoutLMvaImageProcessingTester(self )
@property
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'size' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'apply_ocr' ) )
def a__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
pass
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , _UpperCAmelCase )
self.assertIsInstance(encoding.boxes , _UpperCAmelCase )
# Test batched
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def a__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def a__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def a__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = LayoutLMvaImageProcessor()
from datasets import load_dataset
__lowercase = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
__lowercase = Image.open(ds[0]['file'] ).convert('RGB' )
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__lowercase = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__lowercase = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , _UpperCAmelCase )
self.assertListEqual(encoding.boxes , _UpperCAmelCase )
# with apply_OCR = False
__lowercase = LayoutLMvaImageProcessor(apply_ocr=_UpperCAmelCase )
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 688
| 0
|
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
A : List[Any] = True
except (ImportError, AttributeError):
A : List[str] = object
def _a ( *lowerCamelCase_ , **lowerCamelCase_ ):
pass
A : Dict = False
A : Any = logging.get_logger("""transformers-cli/serving""")
def _a ( lowerCamelCase_ ):
snake_case : Union[str, Any] =pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(lowerCamelCase_ , args.host , args.port , args.workers )
class lowerCAmelCase_ ( a_ ):
__UpperCAmelCase = 42
class lowerCAmelCase_ ( a_ ):
__UpperCAmelCase = 42
__UpperCAmelCase = 42
class lowerCAmelCase_ ( a_ ):
__UpperCAmelCase = 42
class lowerCAmelCase_ ( a_ ):
__UpperCAmelCase = 42
class lowerCAmelCase_ ( a_ ):
@staticmethod
def __snake_case ( _snake_case : ArgumentParser ):
'''simple docstring'''
snake_case : Any =parser.add_parser(
'''serve''', help='''CLI tool to run inference requests through REST and GraphQL endpoints.''' )
serve_parser.add_argument(
'''--task''', type=_snake_case, choices=get_supported_tasks(), help='''The task to run the pipeline on''', )
serve_parser.add_argument('''--host''', type=_snake_case, default='''localhost''', help='''Interface the server will listen on.''' )
serve_parser.add_argument('''--port''', type=_snake_case, default=8_888, help='''Port the serving will listen to.''' )
serve_parser.add_argument('''--workers''', type=_snake_case, default=1, help='''Number of http workers''' )
serve_parser.add_argument('''--model''', type=_snake_case, help='''Model\'s name or path to stored model.''' )
serve_parser.add_argument('''--config''', type=_snake_case, help='''Model\'s config name or path to stored model.''' )
serve_parser.add_argument('''--tokenizer''', type=_snake_case, help='''Tokenizer name to use.''' )
serve_parser.add_argument(
'''--device''', type=_snake_case, default=-1, help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''', )
serve_parser.set_defaults(func=_snake_case )
def __init__( self : int, _snake_case : Pipeline, _snake_case : str, _snake_case : int, _snake_case : int ):
'''simple docstring'''
snake_case : Optional[Any] =pipeline
snake_case : List[str] =host
snake_case : Any =port
snake_case : Tuple =workers
if not _serve_dependencies_installed:
raise RuntimeError(
'''Using serve command requires FastAPI and uvicorn. '''
'''Please install transformers with [serving]: pip install "transformers[serving]".'''
'''Or install FastAPI and uvicorn separately.''' )
else:
logger.info(f'''Serving model over {host}:{port}''' )
snake_case : str =FastAPI(
routes=[
APIRoute(
'''/''', self.model_info, response_model=_snake_case, response_class=_snake_case, methods=['''GET'''], ),
APIRoute(
'''/tokenize''', self.tokenize, response_model=_snake_case, response_class=_snake_case, methods=['''POST'''], ),
APIRoute(
'''/detokenize''', self.detokenize, response_model=_snake_case, response_class=_snake_case, methods=['''POST'''], ),
APIRoute(
'''/forward''', self.forward, response_model=_snake_case, response_class=_snake_case, methods=['''POST'''], ),
], timeout=600, )
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
run(self._app, host=self.host, port=self.port, workers=self.workers )
def __snake_case ( self : List[str] ):
'''simple docstring'''
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def __snake_case ( self : List[str], _snake_case : str = Body(_snake_case, embed=_snake_case ), _snake_case : bool = Body(_snake_case, embed=_snake_case ) ):
'''simple docstring'''
try:
snake_case : Dict =self._pipeline.tokenizer.tokenize(_snake_case )
if return_ids:
snake_case : Union[str, Any] =self._pipeline.tokenizer.convert_tokens_to_ids(_snake_case )
return ServeTokenizeResult(tokens=_snake_case, tokens_ids=_snake_case )
else:
return ServeTokenizeResult(tokens=_snake_case )
except Exception as e:
raise HTTPException(status_code=500, detail={'''model''': '''''', '''error''': str(_snake_case )} )
def __snake_case ( self : Dict, _snake_case : List[int] = Body(_snake_case, embed=_snake_case ), _snake_case : bool = Body(_snake_case, embed=_snake_case ), _snake_case : bool = Body(_snake_case, embed=_snake_case ), ):
'''simple docstring'''
try:
snake_case : Dict =self._pipeline.tokenizer.decode(_snake_case, _snake_case, _snake_case )
return ServeDeTokenizeResult(model='''''', text=_snake_case )
except Exception as e:
raise HTTPException(status_code=500, detail={'''model''': '''''', '''error''': str(_snake_case )} )
async def __snake_case ( self : List[Any], _snake_case : str=Body(_snake_case, embed=_snake_case ) ):
'''simple docstring'''
if len(_snake_case ) == 0:
return ServeForwardResult(output=[], attention=[] )
try:
# Forward through the model
snake_case : Any =self._pipeline(_snake_case )
return ServeForwardResult(output=_snake_case )
except Exception as e:
raise HTTPException(500, {'''error''': str(_snake_case )} )
| 349
|
'''simple docstring'''
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 349
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_: List[Any] = logging.get_logger(__name__)
lowercase_: Union[str, Any] = {
'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json',
}
class lowercase__ (__snake_case ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = 'lxmert'
__UpperCamelCase : Tuple = {}
def __init__( self : List[Any] , __a : Tuple=3_0_5_2_2 , __a : Optional[Any]=7_6_8 , __a : str=1_2 , __a : List[Any]=9_5_0_0 , __a : Dict=1_6_0_0 , __a : str=4_0_0 , __a : str=3_0_7_2 , __a : str="gelu" , __a : Optional[int]=0.1 , __a : Optional[Any]=0.1 , __a : Optional[Any]=5_1_2 , __a : Tuple=2 , __a : Any=0.02 , __a : Union[str, Any]=1e-12 , __a : Any=9 , __a : int=5 , __a : Optional[Any]=5 , __a : Union[str, Any]=2_0_4_8 , __a : List[str]=4 , __a : List[str]=6.67 , __a : str=True , __a : Dict=True , __a : Union[str, Any]=True , __a : Union[str, Any]=True , __a : Dict=True , __a : Union[str, Any]=True , __a : Optional[Any]=True , **__a : Any , ):
snake_case__ : Any = vocab_size
snake_case__ : Dict = hidden_size
snake_case__ : List[str] = num_attention_heads
snake_case__ : Dict = hidden_act
snake_case__ : int = intermediate_size
snake_case__ : Optional[int] = hidden_dropout_prob
snake_case__ : Union[str, Any] = attention_probs_dropout_prob
snake_case__ : Tuple = max_position_embeddings
snake_case__ : int = type_vocab_size
snake_case__ : Any = initializer_range
snake_case__ : Dict = layer_norm_eps
snake_case__ : Any = num_qa_labels
snake_case__ : Dict = num_object_labels
snake_case__ : Optional[Any] = num_attr_labels
snake_case__ : int = l_layers
snake_case__ : Any = x_layers
snake_case__ : List[Any] = r_layers
snake_case__ : Dict = visual_feat_dim
snake_case__ : Optional[int] = visual_pos_dim
snake_case__ : Any = visual_loss_normalizer
snake_case__ : Dict = task_matched
snake_case__ : Union[str, Any] = task_mask_lm
snake_case__ : Optional[int] = task_obj_predict
snake_case__ : Any = task_qa
snake_case__ : List[Any] = visual_obj_loss
snake_case__ : Optional[Any] = visual_attr_loss
snake_case__ : Any = visual_feat_loss
snake_case__ : Union[str, Any] = {"""vision""": r_layers, """cross_encoder""": x_layers, """language""": l_layers}
super().__init__(**__a )
| 127
|
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_: Optional[int] = logging.get_logger(__name__)
lowercase_: Optional[int] = {
'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json',
'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json',
}
class lowercase__ (__snake_case ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = 'encodec'
def __init__( self : List[Any] , __a : Any=[1.5, 3.0, 6.0, 12.0, 24.0] , __a : Any=2_4_0_0_0 , __a : str=1 , __a : Optional[Any]=False , __a : Optional[Any]=None , __a : Any=None , __a : Dict=1_2_8 , __a : Union[str, Any]=3_2 , __a : Optional[Any]=1 , __a : Union[str, Any]=[8, 5, 4, 2] , __a : Tuple="weight_norm" , __a : Union[str, Any]=7 , __a : Tuple=7 , __a : List[Any]=3 , __a : List[str]=2 , __a : List[Any]=True , __a : str="reflect" , __a : int=2 , __a : Dict=2 , __a : str=1.0 , __a : Dict=1_0_2_4 , __a : str=None , __a : str=True , **__a : List[str] , ):
snake_case__ : List[str] = target_bandwidths
snake_case__ : Union[str, Any] = sampling_rate
snake_case__ : str = audio_channels
snake_case__ : List[str] = normalize
snake_case__ : Any = chunk_length_s
snake_case__ : Optional[int] = overlap
snake_case__ : Any = hidden_size
snake_case__ : List[str] = num_filters
snake_case__ : Union[str, Any] = num_residual_layers
snake_case__ : Optional[Any] = upsampling_ratios
snake_case__ : Dict = norm_type
snake_case__ : List[str] = kernel_size
snake_case__ : Tuple = last_kernel_size
snake_case__ : List[str] = residual_kernel_size
snake_case__ : int = dilation_growth_rate
snake_case__ : Optional[Any] = use_causal_conv
snake_case__ : Optional[Any] = pad_mode
snake_case__ : int = compress
snake_case__ : str = num_lstm_layers
snake_case__ : Tuple = trim_right_ratio
snake_case__ : Union[str, Any] = codebook_size
snake_case__ : Optional[int] = codebook_dim if codebook_dim is not None else hidden_size
snake_case__ : Optional[int] = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}' )
super().__init__(**__a )
@property
def lowercase ( self : Union[str, Any] ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def lowercase ( self : Dict ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def lowercase ( self : str ):
snake_case__ : int = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def lowercase ( self : Union[str, Any] ):
return int(1_0_0_0 * self.target_bandwidths[-1] // (self.frame_rate * 1_0) )
| 127
| 1
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'naver-clova-ix/donut-base-finetuned-docvqa'
lowerCAmelCase__ = (
'This is a tool that answers a question about an document (pdf). It takes an input named `document` which '
'should be the document containing the information, as well as a `question` that is the question about the '
'document. It returns a text that contains the answer to the question.'
)
lowerCAmelCase__ = 'document_qa'
lowerCAmelCase__ = AutoProcessor
lowerCAmelCase__ = VisionEncoderDecoderModel
lowerCAmelCase__ = ['image', 'text']
lowerCAmelCase__ = ['text']
def __init__( self : int , *_A : Union[str, Any] , **_A : List[Any] ):
'''simple docstring'''
if not is_vision_available():
raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' )
super().__init__(*_A , **_A )
def lowercase_ ( self : Optional[Any] , _A : "Image" , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
UpperCAmelCase__ : Tuple = task_prompt.replace('''{user_input}''' , _A )
UpperCAmelCase__ : Optional[Any] = self.pre_processor.tokenizer(
_A , add_special_tokens=_A , return_tensors='''pt''' ).input_ids
UpperCAmelCase__ : Dict = self.pre_processor(_A , return_tensors='''pt''' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def lowercase_ ( self : Optional[Any] , _A : List[str] ):
'''simple docstring'''
return self.model.generate(
inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=_A , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=_A , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=_A , ).sequences
def lowercase_ ( self : str , _A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.pre_processor.batch_decode(_A )[0]
UpperCAmelCase__ : Dict = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' )
UpperCAmelCase__ : List[Any] = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' )
UpperCAmelCase__ : int = re.sub(R'''<.*?>''' , '''''' , _A , count=1 ).strip() # remove first task start token
UpperCAmelCase__ : Optional[Any] = self.pre_processor.tokenajson(_A )
return sequence["answer"]
| 75
|
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
UpperCamelCase__ = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def a__ ( ) -> List[str]:
UpperCAmelCase__ : int = Github(os.environ['''GITHUB_TOKEN'''] )
UpperCAmelCase__ : List[Any] = g.get_repo('''huggingface/transformers''' )
UpperCAmelCase__ : List[str] = repo.get_issues(state='''open''' )
for issue in open_issues:
UpperCAmelCase__ : List[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda lowerCAmelCase__ : i.created_at , reverse=lowerCAmelCase__ )
UpperCAmelCase__ : Tuple = comments[0] if len(lowerCAmelCase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 75
| 1
|
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :str ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = torch.nn.Linear(10 , 10 )
snake_case_ : Dict = torch.optim.SGD(model.parameters() , 0.1 )
snake_case_ : Tuple = Accelerator()
snake_case_ : Optional[Any] = accelerator.prepare(lowerCAmelCase__ )
try:
pickle.loads(pickle.dumps(lowerCAmelCase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 714
|
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
__lowerCamelCase : Optional[Any] = datasets.utils.logging.get_logger(__name__)
__lowerCamelCase : List[str] = ['''names''', '''prefix''']
__lowerCamelCase : int = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
__lowerCamelCase : str = ['''encoding_errors''', '''on_bad_lines''']
__lowerCamelCase : Optional[Any] = ['''date_format''']
@dataclass
class A_ (datasets.BuilderConfig ):
"""simple docstring"""
a__ = ","
a__ = None
a__ = "infer"
a__ = None
a__ = None
a__ = None
a__ = None
a__ = None
a__ = True
a__ = None
a__ = None
a__ = None
a__ = None
a__ = False
a__ = None
a__ = None
a__ = None
a__ = True
a__ = True
a__ = False
a__ = True
a__ = None
a__ = "."
a__ = None
a__ = '"'
a__ = 0
a__ = None
a__ = None
a__ = None
a__ = None
a__ = True
a__ = True
a__ = 0
a__ = True
a__ = False
a__ = None
a__ = 10000
a__ = None
a__ = "strict"
a__ = "error"
a__ = None
def _A ( self :List[str] ) -> Any:
'''simple docstring'''
if self.delimiter is not None:
snake_case_ : Tuple = self.delimiter
if self.column_names is not None:
snake_case_ : List[Any] = self.column_names
@property
def _A ( self :Optional[Any] ) -> int:
'''simple docstring'''
snake_case_ : Optional[int] = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class A_ (datasets.ArrowBasedBuilder ):
"""simple docstring"""
a__ = CsvConfig
def _A ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def _A ( self :Tuple , lowerCAmelCase__ :Dict ) -> List[Any]:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
snake_case_ : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase__ , (str, list, tuple) ):
snake_case_ : int = data_files
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case_ : List[str] = [files]
snake_case_ : Tuple = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
snake_case_ : str = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case_ : str = [files]
snake_case_ : Any = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"files": files} ) )
return splits
def _A ( self :List[Any] , lowerCAmelCase__ :pa.Table ) -> pa.Table:
'''simple docstring'''
if self.config.features is not None:
snake_case_ : int = self.config.features.arrow_schema
if all(not require_storage_cast(lowerCAmelCase__ ) for feature in self.config.features.values() ):
# cheaper cast
snake_case_ : Optional[Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
snake_case_ : Dict = table_cast(lowerCAmelCase__ , lowerCAmelCase__ )
return pa_table
def _A ( self :Dict , lowerCAmelCase__ :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
snake_case_ : str = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase__ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ):
snake_case_ : Tuple = pd.read_csv(lowerCAmelCase__ , iterator=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(lowerCAmelCase__ ):
snake_case_ : Optional[int] = pa.Table.from_pandas(lowerCAmelCase__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__ )
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(lowerCAmelCase__ )}: {e}''' )
raise
| 656
| 0
|
'''simple docstring'''
from __future__ import annotations
import queue
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_ ) -> int:
lowerCAmelCase__ = data
lowerCAmelCase__ = None
lowerCAmelCase__ = None
def _snake_case ( ) -> TreeNode:
print('''\n********Press N to stop entering at any point of time********\n''' )
lowerCAmelCase__ = input('''Enter the value of the root node: ''' ).strip().lower()
lowerCAmelCase__ = queue.Queue()
lowerCAmelCase__ = TreeNode(int(A ) )
q.put(A )
while not q.empty():
lowerCAmelCase__ = q.get()
lowerCAmelCase__ = F"""Enter the left node of {node_found.data}: """
lowerCAmelCase__ = input(A ).strip().lower() or '''n'''
if check == "n":
return tree_node
lowerCAmelCase__ = TreeNode(int(A ) )
lowerCAmelCase__ = left_node
q.put(A )
lowerCAmelCase__ = F"""Enter the right node of {node_found.data}: """
lowerCAmelCase__ = input(A ).strip().lower() or '''n'''
if check == "n":
return tree_node
lowerCAmelCase__ = TreeNode(int(A ) )
lowerCAmelCase__ = right_node
q.put(A )
raise
def _snake_case ( A ) -> None:
if not isinstance(A , A ) or not node:
return
print(node.data , end=''',''' )
pre_order(node.left )
pre_order(node.right )
def _snake_case ( A ) -> None:
if not isinstance(A , A ) or not node:
return
in_order(node.left )
print(node.data , end=''',''' )
in_order(node.right )
def _snake_case ( A ) -> None:
if not isinstance(A , A ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=''',''' )
def _snake_case ( A ) -> None:
if not isinstance(A , A ) or not node:
return
lowerCAmelCase__ = queue.Queue()
q.put(A )
while not q.empty():
lowerCAmelCase__ = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def _snake_case ( A ) -> None:
if not isinstance(A , A ) or not node:
return
lowerCAmelCase__ = queue.Queue()
q.put(A )
while not q.empty():
lowerCAmelCase__ = []
while not q.empty():
lowerCAmelCase__ = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(A )
def _snake_case ( A ) -> None:
if not isinstance(A , A ) or not node:
return
lowerCAmelCase__ = []
lowerCAmelCase__ = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=''',''' )
stack.append(A )
lowerCAmelCase__ = n.left
# end of while means current node doesn't have left child
lowerCAmelCase__ = stack.pop()
# start to traverse its right child
lowerCAmelCase__ = n.right
def _snake_case ( A ) -> None:
if not isinstance(A , A ) or not node:
return
lowerCAmelCase__ = []
lowerCAmelCase__ = node
while n or stack:
while n:
stack.append(A )
lowerCAmelCase__ = n.left
lowerCAmelCase__ = stack.pop()
print(n.data , end=''',''' )
lowerCAmelCase__ = n.right
def _snake_case ( A ) -> None:
if not isinstance(A , A ) or not node:
return
lowerCAmelCase__ , lowerCAmelCase__ = [], []
lowerCAmelCase__ = node
stacka.append(A )
while stacka: # to find the reversed order of post order, store it in stack2
lowerCAmelCase__ = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(A )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=''',''' )
def _snake_case ( A = "" , A=50 , A="*" ) -> str:
if not s:
return "\n" + width * char
lowerCAmelCase__ , lowerCAmelCase__ = divmod(width - len(A ) - 2 , 2 )
return F"""{left * char} {s} {(left + extra) * char}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('''Binary Tree Traversals'''))
__UpperCAmelCase = build_tree()
print(prompt('''Pre Order Traversal'''))
pre_order(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal'''))
in_order(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal'''))
post_order(node)
print(prompt() + '''\n''')
print(prompt('''Level Order Traversal'''))
level_order(node)
print(prompt() + '''\n''')
print(prompt('''Actual Level Order Traversal'''))
level_order_actual(node)
print('''*''' * 50 + '''\n''')
print(prompt('''Pre Order Traversal - Iteration Version'''))
pre_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal - Iteration Version'''))
in_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal - Iteration Version'''))
post_order_iter(node)
print(prompt())
| 90
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class UpperCAmelCase_ ( __lowerCamelCase ):
__lowerCamelCase = 'facebook/bart-large-mnli'
__lowerCamelCase = (
'This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '
'should be the text to classify, and `labels`, which should be the list of labels to use for classification. '
'It returns the most likely label in the list of provided `labels` for the input text.'
)
__lowerCamelCase = 'text_classifier'
__lowerCamelCase = AutoTokenizer
__lowerCamelCase = AutoModelForSequenceClassification
__lowerCamelCase = ['text', ['text']]
__lowerCamelCase = ['text']
def __UpperCAmelCase ( self ):
super().setup()
UpperCAmelCase__ : Optional[Any] = self.model.config
UpperCAmelCase__ : Tuple = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("""entail""" ):
UpperCAmelCase__ : Dict = int(_lowerCAmelCase )
if self.entailment_id == -1:
raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""" )
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase__ : List[Any] = labels
return self.pre_processor(
[text] * len(_lowerCAmelCase ) , [f"This example is {label}" for label in labels] , return_tensors="""pt""" , padding="""max_length""" , )
def __UpperCAmelCase ( self , _lowerCAmelCase ):
UpperCAmelCase__ : str = outputs.logits
UpperCAmelCase__ : List[Any] = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 79
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
UpperCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase_ ( _lowercase):
def __init__( self : str , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : float , **__UpperCamelCase : Optional[int] ) -> Union[str, Any]:
_UpperCamelCase = feature_size
_UpperCamelCase = sampling_rate
_UpperCamelCase = padding_value
_UpperCamelCase = kwargs.pop('''padding_side''' , '''right''' )
_UpperCamelCase = kwargs.pop('''return_attention_mask''' , __UpperCamelCase )
super().__init__(**__UpperCamelCase )
def _UpperCamelCase ( self : Optional[int] , __UpperCamelCase : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , __UpperCamelCase : Union[bool, str, PaddingStrategy] = True , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : bool = False , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[Union[str, TensorType]] = None , ) -> BatchFeature:
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(__UpperCamelCase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
_UpperCamelCase = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'''You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'''
F''' to this method that includes {self.model_input_names[0]}, but you provided'''
F''' {list(processed_features.keys() )}''' )
_UpperCamelCase = processed_features[self.model_input_names[0]]
_UpperCamelCase = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(__UpperCamelCase ) == 0:
if return_attention_mask:
_UpperCamelCase = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
_UpperCamelCase = required_input[0]
if isinstance(__UpperCamelCase , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
_UpperCamelCase = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(__UpperCamelCase ):
_UpperCamelCase = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(__UpperCamelCase ):
_UpperCamelCase = '''tf'''
elif is_torch_tensor(__UpperCamelCase ):
_UpperCamelCase = '''pt'''
elif isinstance(__UpperCamelCase , (int, float, list, tuple, np.ndarray) ):
_UpperCamelCase = '''np'''
else:
raise ValueError(
F'''type of {first_element} unknown: {type(__UpperCamelCase )}. '''
'''Should be one of a python, numpy, pytorch or tensorflow object.''' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
_UpperCamelCase = to_numpy(__UpperCamelCase )
else:
_UpperCamelCase = [to_numpy(__UpperCamelCase ) for v in value]
# Convert padding_strategy in PaddingStrategy
_UpperCamelCase = self._get_padding_strategies(padding=__UpperCamelCase , max_length=__UpperCamelCase )
_UpperCamelCase = processed_features[self.model_input_names[0]]
_UpperCamelCase = len(__UpperCamelCase )
if not all(len(__UpperCamelCase ) == batch_size for v in processed_features.values() ):
raise ValueError('''Some items in the output dictionary have a different batch size than others.''' )
_UpperCamelCase = []
for i in range(__UpperCamelCase ):
_UpperCamelCase = {k: v[i] for k, v in processed_features.items()}
# truncation
_UpperCamelCase = self._truncate(
__UpperCamelCase , max_length=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , truncation=__UpperCamelCase , )
truncated_inputs.append(__UpperCamelCase )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
_UpperCamelCase = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
_UpperCamelCase = PaddingStrategy.MAX_LENGTH
_UpperCamelCase = {}
for i in range(__UpperCamelCase ):
# padding
_UpperCamelCase = self._pad(
truncated_inputs[i] , max_length=__UpperCamelCase , padding_strategy=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , )
for key, value in outputs.items():
if key not in batch_outputs:
_UpperCamelCase = []
if value.dtype is np.dtype(np.floataa ):
_UpperCamelCase = value.astype(np.floataa )
batch_outputs[key].append(__UpperCamelCase )
return BatchFeature(__UpperCamelCase , tensor_type=__UpperCamelCase )
def _UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : Union[Dict[str, np.ndarray], BatchFeature] , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[bool] = None , ) -> dict:
_UpperCamelCase = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
_UpperCamelCase = len(__UpperCamelCase )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_UpperCamelCase = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_UpperCamelCase = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(__UpperCamelCase ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
_UpperCamelCase = np.ones(len(__UpperCamelCase ) , dtype=np.intaa )
if needs_to_be_padded:
_UpperCamelCase = max_length - len(__UpperCamelCase )
if self.padding_side == "right":
if return_attention_mask:
_UpperCamelCase = np.pad(
processed_features['''attention_mask'''] , (0, difference) )
_UpperCamelCase = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
_UpperCamelCase = np.pad(
__UpperCamelCase , __UpperCamelCase , '''constant''' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
_UpperCamelCase = np.pad(
processed_features['''attention_mask'''] , (difference, 0) )
_UpperCamelCase = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
_UpperCamelCase = np.pad(
__UpperCamelCase , __UpperCamelCase , '''constant''' , constant_values=self.padding_value )
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return processed_features
def _UpperCamelCase ( self : List[str] , __UpperCamelCase : Union[Dict[str, np.ndarray], BatchFeature] , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[bool] = None , ) -> Any:
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('''When setting ``truncation=True``, make sure that ``max_length`` is defined.''' )
_UpperCamelCase = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_UpperCamelCase = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_UpperCamelCase = len(__UpperCamelCase ) > max_length
if needs_to_be_truncated:
_UpperCamelCase = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
_UpperCamelCase = processed_features['''attention_mask'''][:max_length]
return processed_features
def _UpperCamelCase ( self : str , __UpperCamelCase : Any=False , __UpperCamelCase : Any=None ) -> Any:
# Get padding strategy
if padding is not False:
if padding is True:
_UpperCamelCase = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(__UpperCamelCase , __UpperCamelCase ):
_UpperCamelCase = PaddingStrategy(__UpperCamelCase )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
_UpperCamelCase = padding
else:
_UpperCamelCase = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F'''When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined''' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'''Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'''
''' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.''' )
return padding_strategy
| 342
|
"""simple docstring"""
from timeit import timeit
UpperCAmelCase = {
"""MALAYALAM""": True,
"""String""": False,
"""rotor""": True,
"""level""": True,
"""A""": True,
"""BB""": True,
"""ABC""": False,
"""amanaplanacanalpanama""": True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def lowercase ( a__ : str ) -> bool:
_UpperCamelCase = 0
_UpperCamelCase = len(a__ ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def lowercase ( a__ : str ) -> bool:
_UpperCamelCase = len(a__ ) // 2
_UpperCamelCase = len(a__ )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(a__ ) )
def lowercase ( a__ : str ) -> bool:
if len(a__ ) <= 2:
return True
if s[0] == s[len(a__ ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def lowercase ( a__ : str ) -> bool:
return s == s[::-1]
def lowercase ( a__ : str ) -> None:
_UpperCamelCase = F'''all({name}(key) is value for key, value in test_data.items())'''
_UpperCamelCase = F'''from __main__ import test_data, {name}'''
_UpperCamelCase = 500000
_UpperCamelCase = timeit(stmt=a__ , setup=a__ , number=a__ )
print(F'''{name:<35} finished {number:,} runs in {result:.5f} seconds''' )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(F'''{key:21} {value}''')
print("""a man a plan a canal panama""")
# finished 500,000 runs in 0.46793 seconds
benchmark_function("""is_palindrome_slice""")
# finished 500,000 runs in 0.85234 seconds
benchmark_function("""is_palindrome""")
# finished 500,000 runs in 1.32028 seconds
benchmark_function("""is_palindrome_recursive""")
# finished 500,000 runs in 2.08679 seconds
benchmark_function("""is_palindrome_traversal""")
| 342
| 1
|
from math import log
from scipy.constants import Boltzmann, physical_constants
_lowerCamelCase = 300 # TEMPERATURE (unit = K)
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: float , UpperCamelCase__: float , UpperCamelCase__: float , ):
if donor_conc <= 0:
raise ValueError("""Donor concentration should be positive""" )
elif acceptor_conc <= 0:
raise ValueError("""Acceptor concentration should be positive""" )
elif intrinsic_conc <= 0:
raise ValueError("""Intrinsic concentration should be positive""" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"""Donor concentration should be greater than intrinsic concentration""" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"""Acceptor concentration should be greater than intrinsic concentration""" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6
|
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def snake_case_ ( A_ : Tuple, A_ : List[str], A_ : Optional[Any], A_ : Dict, A_ : Dict=True, A_ : int="pt" ):
'''simple docstring'''
_lowerCamelCase : str = {'''add_prefix_space''': True} if isinstance(A_, A_ ) and not line.startswith(''' ''' ) else {}
_lowerCamelCase : Union[str, Any] = padding_side
return tokenizer(
[line], max_length=A_, padding='''max_length''' if pad_to_max_length else None, truncation=A_, return_tensors=A_, add_special_tokens=A_, **A_, )
def snake_case_ ( A_ : Any, A_ : Optional[int], A_ : List[Any]=None, ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = input_ids.ne(A_ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __snake_case ( _lowercase):
def __init__( self : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple="train" , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Any=None , __lowerCAmelCase : Union[str, Any]="" , ):
"""simple docstring"""
super().__init__()
_lowerCamelCase : Optional[int] = Path(__lowerCAmelCase ).joinpath(type_path + '''.source''' )
_lowerCamelCase : List[str] = Path(__lowerCAmelCase ).joinpath(type_path + '''.target''' )
_lowerCamelCase : List[Any] = self.get_char_lens(self.src_file )
_lowerCamelCase : Optional[int] = max_source_length
_lowerCamelCase : Optional[Any] = max_target_length
assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}'''
_lowerCamelCase : List[Any] = tokenizer
_lowerCamelCase : List[Any] = prefix
if n_obs is not None:
_lowerCamelCase : List[str] = self.src_lens[:n_obs]
_lowerCamelCase : int = src_lang
_lowerCamelCase : Union[str, Any] = tgt_lang
def __len__( self : int ):
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self : Dict , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = index + 1 # linecache starts at 1
_lowerCamelCase : Union[str, Any] = self.prefix + linecache.getline(str(self.src_file ) , __lowerCAmelCase ).rstrip('''\n''' )
_lowerCamelCase : Optional[Any] = linecache.getline(str(self.tgt_file ) , __lowerCAmelCase ).rstrip('''\n''' )
assert source_line, f'''empty source line for index {index}'''
assert tgt_line, f'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , __lowerCAmelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_lowerCamelCase : Optional[int] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , __lowerCAmelCase ) else self.tokenizer
)
_lowerCamelCase : Union[str, Any] = self.tokenizer.generator if isinstance(self.tokenizer , __lowerCAmelCase ) else self.tokenizer
_lowerCamelCase : List[str] = encode_line(__lowerCAmelCase , __lowerCAmelCase , self.max_source_length , '''right''' )
_lowerCamelCase : List[str] = encode_line(__lowerCAmelCase , __lowerCAmelCase , self.max_target_length , '''right''' )
_lowerCamelCase : Optional[Any] = source_inputs['''input_ids'''].squeeze()
_lowerCamelCase : Union[str, Any] = target_inputs['''input_ids'''].squeeze()
_lowerCamelCase : Any = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase : str ):
"""simple docstring"""
return [len(__lowerCAmelCase ) for x in Path(__lowerCAmelCase ).open().readlines()]
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : List[Any] = torch.stack([x['''input_ids'''] for x in batch] )
_lowerCamelCase : Tuple = torch.stack([x['''attention_mask'''] for x in batch] )
_lowerCamelCase : Union[str, Any] = torch.stack([x['''decoder_input_ids'''] for x in batch] )
_lowerCamelCase : Tuple = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , __lowerCAmelCase )
else self.tokenizer.pad_token_id
)
_lowerCamelCase : Tuple = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , __lowerCAmelCase )
else self.tokenizer.pad_token_id
)
_lowerCamelCase : Union[str, Any] = trim_batch(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : List[str] = trim_batch(__lowerCAmelCase , __lowerCAmelCase , attention_mask=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
lowerCAmelCase__ = getLogger(__name__)
def snake_case_ ( A_ : List[List] ):
'''simple docstring'''
return list(itertools.chain.from_iterable(A_ ) )
def snake_case_ ( A_ : str ):
'''simple docstring'''
_lowerCamelCase : Dict = get_git_info()
save_json(A_, os.path.join(A_, '''git_log.json''' ) )
def snake_case_ ( A_ : str, A_ : Union[str, Any], A_ : int=4, **A_ : Optional[int] ):
'''simple docstring'''
with open(A_, '''w''' ) as f:
json.dump(A_, A_, indent=A_, **A_ )
def snake_case_ ( A_ : Any ):
'''simple docstring'''
with open(A_ ) as f:
return json.load(A_ )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : List[str] = git.Repo(search_parent_directories=A_ )
_lowerCamelCase : str = {
'''repo_id''': str(A_ ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def snake_case_ ( A_ : Callable, A_ : Iterable ):
'''simple docstring'''
return list(map(A_, A_ ) )
def snake_case_ ( A_ : str, A_ : Tuple ):
'''simple docstring'''
with open(A_, '''wb''' ) as f:
return pickle.dump(A_, A_ )
def snake_case_ ( A_ : List[str] ):
'''simple docstring'''
def remove_articles(A_ : str ):
return re.sub(R'''\b(a|an|the)\b''', ''' ''', A_ )
def white_space_fix(A_ : Any ):
return " ".join(text.split() )
def remove_punc(A_ : List[Any] ):
_lowerCamelCase : Any = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(A_ : Optional[int] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(A_ ) ) ) )
def snake_case_ ( A_ : int, A_ : List[Any] ):
'''simple docstring'''
_lowerCamelCase : str = normalize_answer(A_ ).split()
_lowerCamelCase : int = normalize_answer(A_ ).split()
_lowerCamelCase : str = Counter(A_ ) & Counter(A_ )
_lowerCamelCase : Any = sum(common.values() )
if num_same == 0:
return 0
_lowerCamelCase : int = 1.0 * num_same / len(A_ )
_lowerCamelCase : str = 1.0 * num_same / len(A_ )
_lowerCamelCase : List[Any] = (2 * precision * recall) / (precision + recall)
return fa
def snake_case_ ( A_ : Dict, A_ : str ):
'''simple docstring'''
return normalize_answer(A_ ) == normalize_answer(A_ )
def snake_case_ ( A_ : List[str], A_ : List[str] ):
'''simple docstring'''
assert len(A_ ) == len(A_ )
_lowerCamelCase : Optional[Any] = 0
for hypo, pred in zip(A_, A_ ):
em += exact_match_score(A_, A_ )
if len(A_ ) > 0:
em /= len(A_ )
return {"em": em}
def snake_case_ ( A_ : Optional[int] ):
'''simple docstring'''
return model_prefix.startswith('''rag''' )
def snake_case_ ( A_ : Dict, A_ : int, A_ : List[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_lowerCamelCase : Tuple = '''dropout_rate'''
for p in extra_params:
if getattr(A_, A_, A_ ):
if not hasattr(A_, A_ ) and not hasattr(A_, equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(A_ ) )
delattr(A_, A_ )
continue
_lowerCamelCase : Union[str, Any] = p if hasattr(A_, A_ ) else equivalent_param[p]
setattr(A_, A_, getattr(A_, A_ ) )
delattr(A_, A_ )
return hparams, config
| 83
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
__UpperCamelCase : Dict = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
__UpperCamelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 106
|
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
__UpperCamelCase : List[str] = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
__UpperCamelCase : Dict = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __magic_name__ :
A: str = field(
default=__lowerCAmelCase , metadata={"help": "Model type selected in the list: " + ", ".join(__lowerCAmelCase)})
A: str = field(
default=__lowerCAmelCase , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."})
A: int = field(
default=1_2_8 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A: int = field(
default=1_2_8 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , )
A: int = field(
default=6_4 , metadata={
"help": (
"The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length."
)
} , )
A: int = field(
default=3_0 , metadata={
"help": (
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
)
} , )
A: bool = field(
default=__lowerCAmelCase , metadata={"help": "Overwrite the cached training and evaluation sets"})
A: bool = field(
default=__lowerCAmelCase , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."})
A: float = field(
default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."})
A: int = field(
default=2_0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."})
A: int = field(
default=0 , metadata={
"help": (
"language id of input for language-specific xlm models (see"
" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"
)
} , )
A: int = field(default=1 , metadata={"help": "multiple threads for converting example to features"})
class __magic_name__ ( __lowerCAmelCase):
A: Union[str, Any] = "train"
A: List[str] = "dev"
class __magic_name__ ( __lowerCAmelCase):
A: SquadDataTrainingArguments
A: List[SquadFeatures]
A: Split
A: bool
def __init__( self : Tuple , lowerCamelCase__ : SquadDataTrainingArguments , lowerCamelCase__ : PreTrainedTokenizer , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Union[str, Split] = Split.train , lowerCamelCase__ : Optional[bool] = False , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[str] = "pt" , ) -> int:
'''simple docstring'''
UpperCamelCase__ : Dict = args
UpperCamelCase__ : Union[str, Any] = is_language_sensitive
UpperCamelCase__ : List[Any] = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
try:
UpperCamelCase__ : Union[str, Any] = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
UpperCamelCase__ : List[str] = mode
# Load data features from cache or dataset file
UpperCamelCase__ : List[str] = '''v2''' if args.version_2_with_negative else '''v1'''
UpperCamelCase__ : str = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCamelCase__ : Tuple = cached_features_file + '''.lock'''
with FileLock(lowerCamelCase__ ):
if os.path.exists(lowerCamelCase__ ) and not args.overwrite_cache:
UpperCamelCase__ : int = time.time()
UpperCamelCase__ : List[str] = torch.load(lowerCamelCase__ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
UpperCamelCase__ : Any = self.old_features['''features''']
UpperCamelCase__ : Union[str, Any] = self.old_features.get('''dataset''' , lowerCamelCase__ )
UpperCamelCase__ : List[str] = self.old_features.get('''examples''' , lowerCamelCase__ )
logger.info(
F"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"
''' future run''' )
else:
if mode == Split.dev:
UpperCamelCase__ : List[str] = self.processor.get_dev_examples(args.data_dir )
else:
UpperCamelCase__ : Any = self.processor.get_train_examples(args.data_dir )
UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] = squad_convert_examples_to_features(
examples=self.examples , tokenizer=lowerCamelCase__ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=lowerCamelCase__ , )
UpperCamelCase__ : Tuple = time.time()
torch.save(
{'''features''': self.features, '''dataset''': self.dataset, '''examples''': self.examples} , lowerCamelCase__ , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" )
def __len__( self : Any ) -> List[str]:
'''simple docstring'''
return len(self.features )
def __getitem__( self : Dict , lowerCamelCase__ : Any ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
UpperCamelCase__ : List[Any] = self.features[i]
UpperCamelCase__ : Optional[Any] = torch.tensor(feature.input_ids , dtype=torch.long )
UpperCamelCase__ : Tuple = torch.tensor(feature.attention_mask , dtype=torch.long )
UpperCamelCase__ : Optional[int] = torch.tensor(feature.token_type_ids , dtype=torch.long )
UpperCamelCase__ : int = torch.tensor(feature.cls_index , dtype=torch.long )
UpperCamelCase__ : List[Any] = torch.tensor(feature.p_mask , dtype=torch.float )
UpperCamelCase__ : Any = torch.tensor(feature.is_impossible , dtype=torch.float )
UpperCamelCase__ : Dict = {
'''input_ids''': input_ids,
'''attention_mask''': attention_mask,
'''token_type_ids''': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'''cls_index''': cls_index, '''p_mask''': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'''is_impossible''': is_impossible} )
if self.is_language_sensitive:
inputs.update({'''langs''': (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
UpperCamelCase__ : Optional[Any] = torch.tensor(feature.start_position , dtype=torch.long )
UpperCamelCase__ : Optional[int] = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({'''start_positions''': start_positions, '''end_positions''': end_positions} )
return inputs
| 106
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'timm_backbone'
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> str:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = backbone
lowerCamelCase_ = num_channels
lowerCamelCase_ = features_only
lowerCamelCase_ = use_pretrained_backbone
lowerCamelCase_ = True
lowerCamelCase_ = out_indices if out_indices is not None else (-1,)
| 42
|
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> float:
lowerCamelCase_ = x
lowerCamelCase_ = y
for step in range(__UpperCamelCase ): # noqa: B007
lowerCamelCase_ = a * a - b * b + x
lowerCamelCase_ = 2 * a * b + y
lowerCamelCase_ = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _UpperCamelCase ( __UpperCamelCase ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (2_55, 2_55, 2_55)
def _UpperCamelCase ( __UpperCamelCase ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(__UpperCamelCase ,1 ,1 ) )
def _UpperCamelCase ( __UpperCamelCase = 8_00 ,__UpperCamelCase = 6_00 ,__UpperCamelCase = -0.6 ,__UpperCamelCase = 0 ,__UpperCamelCase = 3.2 ,__UpperCamelCase = 50 ,__UpperCamelCase = True ,) -> Image.Image:
lowerCamelCase_ = Image.new('RGB' ,(image_width, image_height) )
lowerCamelCase_ = img.load()
# loop through the image-coordinates
for image_x in range(__UpperCamelCase ):
for image_y in range(__UpperCamelCase ):
# determine the figure-coordinates based on the image-coordinates
lowerCamelCase_ = figure_width / image_width * image_height
lowerCamelCase_ = figure_center_x + (image_x / image_width - 0.5) * figure_width
lowerCamelCase_ = figure_center_y + (image_y / image_height - 0.5) * figure_height
lowerCamelCase_ = get_distance(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
lowerCamelCase_ = get_color_coded_rgb(__UpperCamelCase )
else:
lowerCamelCase_ = get_black_and_white_rgb(__UpperCamelCase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
A_ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 42
| 1
|
'''simple docstring'''
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
__lowerCAmelCase : Dict = datasets.utils.logging.get_logger(__name__)
class A ( folder_based_builder.FolderBasedBuilderConfig ):
a_ = None
a_ = None
class A ( folder_based_builder.FolderBasedBuilder ):
a_ = datasets.Audio()
a_ = '''audio'''
a_ = AudioFolderConfig
a_ = 4_2 # definition at the bottom of the script
a_ = AudioClassification(audio_column='''audio''' , label_column='''label''' )
__lowerCAmelCase : int = [
".aiff",
".au",
".avr",
".caf",
".flac",
".htk",
".svx",
".mat4",
".mat5",
".mpc2k",
".ogg",
".paf",
".pvf",
".raw",
".rf64",
".sd2",
".sds",
".ircam",
".voc",
".w64",
".wav",
".nist",
".wavex",
".wve",
".xi",
".mp3",
".opus",
]
__lowerCAmelCase : str = AUDIO_EXTENSIONS
| 708
|
'''simple docstring'''
import argparse
import os
import re
import packaging.version
__lowerCAmelCase : Optional[int] = "examples/"
__lowerCAmelCase : Dict = {
"examples": (re.compile(r"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(r"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(r"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), r"\1version=\"VERSION\","),
"doc": (re.compile(r"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
__lowerCAmelCase : List[str] = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
__lowerCAmelCase : int = "README.md"
def lowerCAmelCase ( UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple ):
"""simple docstring"""
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__UpperCAmelCase = f.read()
__UpperCAmelCase , __UpperCAmelCase = REPLACE_PATTERNS[pattern]
__UpperCAmelCase = replace.replace('''VERSION''' , UpperCamelCase__ )
__UpperCAmelCase = re_pattern.sub(UpperCamelCase__ , UpperCamelCase__ )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
for folder, directories, fnames in os.walk(UpperCamelCase__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ , pattern='''examples''' )
def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : Any=False ):
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not patch:
update_version_in_examples(UpperCamelCase__ )
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = '''🤗 Transformers currently provides the following architectures'''
__UpperCAmelCase = '''1. Want to contribute a new model?'''
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__UpperCAmelCase = f.readlines()
# Find the start of the list.
__UpperCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__UpperCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
__UpperCAmelCase = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(UpperCamelCase__ )
def lowerCAmelCase ( ):
"""simple docstring"""
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
__UpperCAmelCase = f.read()
__UpperCAmelCase = REPLACE_PATTERNS['''init'''][0].search(UpperCamelCase__ ).groups()[0]
return packaging.version.parse(UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : Any=False ):
"""simple docstring"""
__UpperCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
__UpperCAmelCase = default_version.base_version
elif patch:
__UpperCAmelCase = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
__UpperCAmelCase = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
__UpperCAmelCase = input(f"""Which version are you releasing? [{default_version}]""" )
if len(UpperCamelCase__ ) == 0:
__UpperCAmelCase = default_version
print(f"""Updating version to {version}.""" )
global_version_update(UpperCamelCase__ , patch=UpperCamelCase__ )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = get_version()
__UpperCAmelCase = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
__UpperCAmelCase = current_version.base_version
# Check with the user we got that right.
__UpperCAmelCase = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(UpperCamelCase__ ) == 0:
__UpperCAmelCase = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(UpperCamelCase__ )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
__lowerCAmelCase : Tuple = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 654
| 0
|
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
__A =logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , *lowercase , **lowercase ) -> None:
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead." , lowercase , )
super().__init__(*lowercase , **lowercase )
| 463
|
from __future__ import annotations
import math
def lowerCamelCase_ ( lowerCamelCase__ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
__A =[num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)]
def lowerCamelCase_ ( lowerCamelCase__ ):
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError("n must be an integer" )
if n <= 0:
raise ValueError("n must be >= 0" )
lowerCamelCase_ = []
for num in range(len(lowerCamelCase__ ) ):
lowerCamelCase_ = 0
while 2 * i * i <= odd_composites[num]:
lowerCamelCase_ = odd_composites[num] - 2 * i * i
if is_prime(lowerCamelCase__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(lowerCamelCase__ ) == n:
return list_nums
return []
def lowerCamelCase_ ( ):
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 463
| 1
|
'''simple docstring'''
from __future__ import annotations
from typing import Generic, TypeVar
lowerCAmelCase__ : Any = TypeVar("""T""")
class a ( Generic[T] ):
"""simple docstring"""
def __init__( self : Tuple , snake_case_ : Optional[int] ):
'''simple docstring'''
snake_case__ : Any = data
snake_case__ : Any = self
snake_case__ : Dict = 0
class a ( Generic[T] ):
"""simple docstring"""
def __init__( self : List[Any] ):
'''simple docstring'''
snake_case__ : Tuple = {}
def __magic_name__ ( self : Optional[Any] , snake_case_ : List[str] ):
'''simple docstring'''
snake_case__ : Dict = DisjointSetTreeNode(UpperCAmelCase_ )
def __magic_name__ ( self : Optional[int] , snake_case_ : int ):
'''simple docstring'''
snake_case__ : Any = self.map[data]
if elem_ref != elem_ref.parent:
snake_case__ : Optional[int] = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def __magic_name__ ( self : Dict , snake_case_ : Optional[Any] , snake_case_ : Dict ):
'''simple docstring'''
if nodea.rank > nodea.rank:
snake_case__ : List[str] = nodea
else:
snake_case__ : Union[str, Any] = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def __magic_name__ ( self : Union[str, Any] , snake_case_ : List[str] , snake_case_ : Optional[int] ):
'''simple docstring'''
self.link(self.find_set(UpperCAmelCase_ ) , self.find_set(UpperCAmelCase_ ) )
class a ( Generic[T] ):
"""simple docstring"""
def __init__( self : Tuple ):
'''simple docstring'''
snake_case__ : str = {}
def __magic_name__ ( self : Tuple , snake_case_ : Tuple ):
'''simple docstring'''
if node not in self.connections:
snake_case__ : List[str] = {}
def __magic_name__ ( self : List[Any] , snake_case_ : List[Any] , snake_case_ : str , snake_case_ : List[str] ):
'''simple docstring'''
self.add_node(UpperCAmelCase_ )
self.add_node(UpperCAmelCase_ )
snake_case__ : Union[str, Any] = weight
snake_case__ : str = weight
def __magic_name__ ( self : Any ):
'''simple docstring'''
snake_case__ : Tuple = []
snake_case__ : int = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda snake_case_ : x[2] )
# creating the disjoint set
snake_case__ : int = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(UpperCAmelCase_ )
# MST generation
snake_case__ : int = 0
snake_case__ : str = 0
snake_case__ : List[str] = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
snake_case__ , snake_case__ , snake_case__ : Dict = edges[index]
index += 1
snake_case__ : Dict = disjoint_set.find_set(UpperCAmelCase_ )
snake_case__ : Optional[int] = disjoint_set.find_set(UpperCAmelCase_ )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
disjoint_set.union(UpperCAmelCase_ , UpperCAmelCase_ )
return graph
| 711
|
'''simple docstring'''
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def _a ( __lowerCAmelCase : Dict ):
"""simple docstring"""
snake_case__ : int = args.pruning_method
snake_case__ : Dict = args.threshold
snake_case__ : str = args.model_name_or_path.rstrip('''/''' )
snake_case__ : int = args.target_model_path
print(F"""Load fine-pruned model from {model_name_or_path}""" )
snake_case__ : Dict = torch.load(os.path.join(__lowerCAmelCase , '''pytorch_model.bin''' ) )
snake_case__ : Any = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
snake_case__ : List[Any] = tensor
print(F"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
snake_case__ : int = tensor
print(F"""Copied layer {name}""" )
elif "bias" in name:
snake_case__ : Union[str, Any] = tensor
print(F"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
snake_case__ : Dict = MagnitudeBinarizer.apply(inputs=__lowerCAmelCase , threshold=__lowerCAmelCase )
snake_case__ : Optional[int] = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
snake_case__ : List[str] = name[:-6]
snake_case__ : List[Any] = model[F"""{prefix_}mask_scores"""]
snake_case__ : Optional[int] = TopKBinarizer.apply(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ : Any = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
snake_case__ : Optional[Any] = name[:-6]
snake_case__ : Dict = model[F"""{prefix_}mask_scores"""]
snake_case__ : Dict = ThresholdBinarizer.apply(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
snake_case__ : int = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
snake_case__ : Any = name[:-6]
snake_case__ : Tuple = model[F"""{prefix_}mask_scores"""]
snake_case__ , snake_case__ : Tuple = -0.1, 1.1
snake_case__ : List[str] = torch.sigmoid(__lowerCAmelCase )
snake_case__ : Optional[int] = s * (r - l) + l
snake_case__ : Optional[int] = s_bar.clamp(min=0.0 , max=1.0 )
snake_case__ : Union[str, Any] = tensor * mask
print(F"""Pruned layer {name}""" )
else:
raise ValueError('''Unknown pruning method''' )
if target_model_path is None:
snake_case__ : Optional[int] = os.path.join(
os.path.dirname(__lowerCAmelCase ) , F"""bertarized_{os.path.basename(__lowerCAmelCase )}""" )
if not os.path.isdir(__lowerCAmelCase ):
shutil.copytree(__lowerCAmelCase , __lowerCAmelCase )
print(F"""\nCreated folder {target_model_path}""" )
torch.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , '''pytorch_model.bin''' ) )
print('''\nPruned model saved! See you later!''' )
if __name__ == "__main__":
lowerCAmelCase__ : str = argparse.ArgumentParser()
parser.add_argument(
"""--pruning_method""",
choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""],
type=str,
required=True,
help=(
"""Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"""
""" sigmoied_threshold = Soft movement pruning)"""
),
)
parser.add_argument(
"""--threshold""",
type=float,
required=False,
help=(
"""For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."""
"""For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."""
"""Not needed for `l0`"""
),
)
parser.add_argument(
"""--model_name_or_path""",
type=str,
required=True,
help="""Folder containing the model that was previously fine-pruned""",
)
parser.add_argument(
"""--target_model_path""",
default=None,
type=str,
required=False,
help="""Folder containing the model that was previously fine-pruned""",
)
lowerCAmelCase__ : Optional[Any] = parser.parse_args()
main(args)
| 502
| 0
|
"""simple docstring"""
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = generate_pascal_triangle(lowerCAmelCase__ )
for row_idx in range(lowerCAmelCase__ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=''' ''' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] ,end=''' ''' )
else:
print(triangle[row_idx][col_idx] ,end='''''' )
print()
def lowercase ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
lowerCamelCase_ = []
for current_row_idx in range(lowerCAmelCase__ ):
lowerCamelCase_ = populate_current_row(lowerCAmelCase__ ,lowerCAmelCase__ )
triangle.append(lowerCAmelCase__ )
return triangle
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
lowerCamelCase_ , lowerCamelCase_ = 1, 1
for current_col_idx in range(1 ,lowerCAmelCase__ ):
calculate_current_element(
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
return current_row
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,):
lowerCamelCase_ = triangle[current_row_idx - 1][current_col_idx - 1]
lowerCamelCase_ = triangle[current_row_idx - 1][current_col_idx]
lowerCamelCase_ = above_to_left_elt + above_to_right_elt
def lowercase ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
lowerCamelCase_ = [[1]]
for row_index in range(1 ,lowerCAmelCase__ ):
lowerCamelCase_ = [0] + result[-1] + [0]
lowerCamelCase_ = row_index + 1
# Calculate the number of distinct elements in a row
lowerCamelCase_ = sum(divmod(lowerCAmelCase__ ,2 ) )
lowerCamelCase_ = [
temp_row[i - 1] + temp_row[i] for i in range(1 ,distinct_elements + 1 )
]
lowerCamelCase_ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
lowerCamelCase_ = row_first_half + row_second_half
result.append(lowerCAmelCase__ )
return result
def lowercase ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowerCAmelCase__ ,lowerCAmelCase__ ) -> None:
lowerCamelCase_ = f"{func.__name__}({value})"
lowerCamelCase_ = timeit(f"__main__.{call}" ,setup='''import __main__''' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(lowerCAmelCase__ ,lowerCAmelCase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 29
|
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCAmelCase : Dict = logging.get_logger(__name__)
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : List[str] = ["""input_values""", """attention_mask"""]
def __init__( self , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 1_6_0_0_0 , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = False , lowerCAmelCase__ = 8_0 , lowerCAmelCase__ = 1_6 , lowerCAmelCase__ = 6_4 , lowerCAmelCase__ = "hann_window" , lowerCAmelCase__ = 1.0 , lowerCAmelCase__ = 8_0 , lowerCAmelCase__ = 7_6_0_0 , lowerCAmelCase__ = 1E-10 , lowerCAmelCase__ = 2 , lowerCAmelCase__ = True , **lowerCAmelCase__ , ) -> str:
'''simple docstring'''
super().__init__(feature_size=lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , padding_value=lowerCAmelCase__ , **lowerCAmelCase__ )
a__ : Tuple =do_normalize
a__ : Tuple =return_attention_mask
a__ : str =num_mel_bins
a__ : Any =hop_length
a__ : Optional[Any] =win_length
a__ : int =win_function
a__ : List[str] =frame_signal_scale
a__ : List[str] =fmin
a__ : str =fmax
a__ : Dict =mel_floor
a__ : Any =reduction_factor
a__ : str =win_length * sampling_rate // 1_0_0_0
a__ : List[str] =hop_length * sampling_rate // 1_0_0_0
a__ : Optional[Any] =optimal_fft_length(self.sample_size )
a__ : Any =(self.n_fft // 2) + 1
a__ : List[Any] =window_function(window_length=self.sample_size , name=self.win_function , periodic=lowerCAmelCase__ )
a__ : Optional[int] =mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm="slaney" , mel_scale="slaney" , )
if frame_signal_scale != 1.0:
warnings.warn(
"The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers" , lowerCAmelCase__ , )
if reduction_factor != 2.0:
warnings.warn(
"The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers" , lowerCAmelCase__ , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _lowercase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 0.0 ) -> List[np.ndarray]:
'''simple docstring'''
if attention_mask is not None:
a__ : List[Any] =np.array(lowerCAmelCase__ , np.intaa )
a__ : Optional[Any] =[]
for vector, length in zip(lowerCAmelCase__ , attention_mask.sum(-1 ) ):
a__ : Tuple =(vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
a__ : Any =padding_value
normed_input_values.append(lowerCAmelCase__ )
else:
a__ : Optional[int] =[(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def _lowercase ( self , lowerCAmelCase__ , ) -> np.ndarray:
'''simple docstring'''
a__ : Dict =spectrogram(
lowerCAmelCase__ , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel="log10" , )
return log_mel_spec.T
def __call__( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> BatchFeature:
'''simple docstring'''
if audio is None and audio_target is None:
raise ValueError("You must provide either `audio` or `audio_target` values." )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if audio is not None:
a__ : Dict =self._process_audio(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ , )
else:
a__ : str =None
if audio_target is not None:
a__ : int =self._process_audio(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ , )
if inputs is None:
return inputs_target
else:
a__ : Any =inputs_target["input_values"]
a__ : List[Any] =inputs_target.get("attention_mask" )
if decoder_attention_mask is not None:
a__ : Optional[int] =decoder_attention_mask
return inputs
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> BatchFeature:
'''simple docstring'''
a__ : List[Any] =isinstance(lowerCAmelCase__ , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
a__ : Optional[int] =is_batched_numpy or (
isinstance(lowerCAmelCase__ , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
a__ : int =[np.asarray(lowerCAmelCase__ , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(lowerCAmelCase__ , np.ndarray ):
a__ : List[Any] =np.asarray(lowerCAmelCase__ , dtype=np.floataa )
elif isinstance(lowerCAmelCase__ , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
a__ : Optional[Any] =speech.astype(np.floataa )
# always return batch
if not is_batched:
a__ : Union[str, Any] =[speech]
# needed to make pad() work on spectrogram inputs
a__ : Union[str, Any] =self.feature_size
# convert into correct format for padding
if is_target:
a__ : Dict =[self._extract_mel_features(lowerCAmelCase__ ) for waveform in speech]
a__ : str =BatchFeature({"input_values": features} )
a__ : List[str] =self.num_mel_bins
else:
a__ : List[str] =BatchFeature({"input_values": speech} )
a__ : Optional[int] =self.pad(
lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , )
a__ : Any =feature_size_hack
# convert input values to correct format
a__ : List[Any] =padded_inputs["input_values"]
if not isinstance(input_values[0] , np.ndarray ):
a__ : Union[str, Any] =[np.asarray(lowerCAmelCase__ , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(lowerCAmelCase__ , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
a__ : str =[array.astype(np.floataa ) for array in input_values]
elif isinstance(lowerCAmelCase__ , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
a__ : Optional[int] =input_values.astype(np.floataa )
# convert attention_mask to correct format
a__ : str =padded_inputs.get("attention_mask" )
if attention_mask is not None:
a__ : str =[np.asarray(lowerCAmelCase__ , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
a__ : Union[str, Any] =(
attention_mask
if self._get_padding_strategies(lowerCAmelCase__ , max_length=lowerCAmelCase__ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
a__ : List[Any] =self.zero_mean_unit_var_norm(
padded_inputs["input_values"] , attention_mask=lowerCAmelCase__ , padding_value=self.padding_value )
if return_tensors is not None:
a__ : int =padded_inputs.convert_to_tensors(lowerCAmelCase__ )
return padded_inputs
def _lowercase ( self ) -> Dict[str, Any]:
'''simple docstring'''
a__ : Optional[int] =super().to_dict()
# Don't serialize these as they are derived from the other properties.
a__ : Optional[Any] =["window", "mel_filters", "sample_size", "sample_stride", "n_fft", "n_freqs"]
for name in names:
if name in output:
del output[name]
return output
| 563
| 0
|
from __future__ import annotations
UpperCamelCase = '''#'''
class __UpperCAmelCase :
def __init__( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {}
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self._trie
for char in text:
if char not in trie:
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = trie[char]
_SCREAMING_SNAKE_CASE = True
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self._trie
for char in prefix:
if char in trie:
_SCREAMING_SNAKE_CASE = trie[char]
else:
return []
return self._elements(UpperCAmelCase_ )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
for c, v in d.items():
_SCREAMING_SNAKE_CASE = [""" """] if c == END else [(c + s) for s in self._elements(UpperCAmelCase_ )]
result.extend(UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
UpperCamelCase = Trie()
UpperCamelCase = ('''depart''', '''detergent''', '''daring''', '''dog''', '''deer''', '''deal''')
for word in words:
trie.insert_word(word)
def __lowerCamelCase ( snake_case__ ) -> tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = trie.find_word(snake_case__ )
return tuple(string + word for word in suffixes )
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
print(autocomplete_using_trie("""de""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 716
|
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class __UpperCAmelCase (TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
def __init__( self: str , UpperCAmelCase_: Union[str, Any]=None , **UpperCAmelCase_: Dict ):
'''simple docstring'''
super().__init__(features=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch_tensor_kwargs
import torch # noqa import torch at initialization
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Optional[int] ):
'''simple docstring'''
import torch
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and column:
if all(
isinstance(UpperCAmelCase_ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(UpperCAmelCase_ )
return column
def UpperCamelCase ( self: int , UpperCAmelCase_: Optional[int] ):
'''simple docstring'''
import torch
if isinstance(UpperCAmelCase_ , (str, bytes, type(UpperCAmelCase_ )) ):
return value
elif isinstance(UpperCAmelCase_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
_SCREAMING_SNAKE_CASE = {}
if isinstance(UpperCAmelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
_SCREAMING_SNAKE_CASE = {"""dtype""": torch.intaa}
elif isinstance(UpperCAmelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
_SCREAMING_SNAKE_CASE = {"""dtype""": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCAmelCase_ , PIL.Image.Image ):
_SCREAMING_SNAKE_CASE = np.asarray(UpperCAmelCase_ )
return torch.tensor(UpperCAmelCase_ , **{**default_dtype, **self.torch_tensor_kwargs} )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: str ):
'''simple docstring'''
import torch
# support for torch, tf, jax etc.
if hasattr(UpperCAmelCase_ , """__array__""" ) and not isinstance(UpperCAmelCase_ , torch.Tensor ):
_SCREAMING_SNAKE_CASE = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCAmelCase_ , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCAmelCase_ ) for substruct in data_struct] )
elif isinstance(UpperCAmelCase_ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCAmelCase_ ) for substruct in data_struct] )
return self._tensorize(UpperCAmelCase_ )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: dict ):
'''simple docstring'''
return map_nested(self._recursive_tensorize , UpperCAmelCase_ , map_list=UpperCAmelCase_ )
def UpperCamelCase ( self: str , UpperCAmelCase_: pa.Table ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.numpy_arrow_extractor().extract_row(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.python_features_decoder.decode_row(UpperCAmelCase_ )
return self.recursive_tensorize(UpperCAmelCase_ )
def UpperCamelCase ( self: Any , UpperCAmelCase_: pa.Table ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.numpy_arrow_extractor().extract_column(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.python_features_decoder.decode_column(UpperCAmelCase_ , pa_table.column_names[0] )
_SCREAMING_SNAKE_CASE = self.recursive_tensorize(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self._consolidate(UpperCAmelCase_ )
return column
def UpperCamelCase ( self: str , UpperCAmelCase_: pa.Table ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.numpy_arrow_extractor().extract_batch(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.python_features_decoder.decode_batch(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.recursive_tensorize(UpperCAmelCase_ )
for column_name in batch:
_SCREAMING_SNAKE_CASE = self._consolidate(batch[column_name] )
return batch
| 569
| 0
|
'''simple docstring'''
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
# A mock response for an HTTP head request to emulate server down
lowerCamelCase__ = mock.Mock()
lowerCamelCase__ = 5_00
lowerCamelCase__ = {}
lowerCamelCase__ = HTTPError
lowerCamelCase__ = {}
# Download this model to make sure it's in the cache.
lowerCamelCase__ = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" ,return_value=_lowerCAmelCase ) as mock_head:
lowerCamelCase__ = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def UpperCamelCase_ ( self ):
# A mock response for an HTTP head request to emulate server down
lowerCamelCase__ = mock.Mock()
lowerCamelCase__ = 5_00
lowerCamelCase__ = {}
lowerCamelCase__ = HTTPError
lowerCamelCase__ = {}
# Download this model to make sure it's in the cache.
lowerCamelCase__ = GPTaTokenizerFast.from_pretrained("""gpt2""" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" ,return_value=_lowerCAmelCase ) as mock_head:
lowerCamelCase__ = GPTaTokenizerFast.from_pretrained("""gpt2""" )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase_ ( self ):
# This test is for deprecated behavior and can be removed in v5
try:
lowerCamelCase__ = tempfile.mktemp()
with open(_lowerCAmelCase ,"""wb""" ) as f:
http_get("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" ,_lowerCAmelCase )
lowerCamelCase__ = AlbertTokenizer.from_pretrained(_lowerCAmelCase )
finally:
os.remove(_lowerCAmelCase )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("""tokenizer.json""" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("""tokenizer.json""" ,"""wb""" ) as f:
http_get("""https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json""" ,_lowerCAmelCase )
lowerCamelCase__ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size ,10_00 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("""tokenizer.json""" )
def UpperCamelCase_ ( self ):
# This test is for deprecated behavior and can be removed in v5
lowerCamelCase__ = AlbertTokenizer.from_pretrained("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" )
@is_staging_test
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def UpperCamelCase_ ( cls ):
lowerCamelCase__ = TOKEN
HfFolder.save_token(_lowerCAmelCase )
@classmethod
def UpperCamelCase_ ( cls ):
try:
delete_repo(token=cls._token ,repo_id="""test-tokenizer""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="""valid_org/test-tokenizer-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="""test-dynamic-tokenizer""" )
except HTTPError:
pass
def UpperCamelCase_ ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase__ = os.path.join(_lowerCAmelCase ,"""vocab.txt""" )
with open(_lowerCAmelCase ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
lowerCamelCase__ = BertTokenizer(_lowerCAmelCase )
tokenizer.push_to_hub("""test-tokenizer""" ,use_auth_token=self._token )
lowerCamelCase__ = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="""test-tokenizer""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowerCAmelCase ,repo_id="""test-tokenizer""" ,push_to_hub=_lowerCAmelCase ,use_auth_token=self._token )
lowerCamelCase__ = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
def UpperCamelCase_ ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase__ = os.path.join(_lowerCAmelCase ,"""vocab.txt""" )
with open(_lowerCAmelCase ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
lowerCamelCase__ = BertTokenizer(_lowerCAmelCase )
tokenizer.push_to_hub("""valid_org/test-tokenizer-org""" ,use_auth_token=self._token )
lowerCamelCase__ = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="""valid_org/test-tokenizer-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
_lowerCAmelCase ,repo_id="""valid_org/test-tokenizer-org""" ,push_to_hub=_lowerCAmelCase ,use_auth_token=self._token )
lowerCamelCase__ = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
@require_tokenizers
def UpperCamelCase_ ( self ):
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase__ = os.path.join(_lowerCAmelCase ,"""vocab.txt""" )
with open(_lowerCAmelCase ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
lowerCamelCase__ = CustomTokenizer(_lowerCAmelCase )
# No fast custom tokenizer
tokenizer.push_to_hub("""test-dynamic-tokenizer""" ,use_auth_token=self._token )
lowerCamelCase__ = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' ,trust_remote_code=_lowerCAmelCase )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizer""" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase__ = os.path.join(_lowerCAmelCase ,"""vocab.txt""" )
with open(_lowerCAmelCase ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
lowerCamelCase__ = BertTokenizerFast.from_pretrained(_lowerCAmelCase )
bert_tokenizer.save_pretrained(_lowerCAmelCase )
lowerCamelCase__ = CustomTokenizerFast.from_pretrained(_lowerCAmelCase )
tokenizer.push_to_hub("""test-dynamic-tokenizer""" ,use_auth_token=self._token )
lowerCamelCase__ = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' ,trust_remote_code=_lowerCAmelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizerFast""" )
lowerCamelCase__ = AutoTokenizer.from_pretrained(
F'''{USER}/test-dynamic-tokenizer''' ,use_fast=_lowerCAmelCase ,trust_remote_code=_lowerCAmelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizer""" )
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
lowerCamelCase__ = Trie()
trie.add("""Hello 友達""" )
self.assertEqual(trie.data ,{"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {""" """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} )
trie.add("""Hello""" )
trie.data
self.assertEqual(trie.data ,{"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {"""""": 1, """ """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = Trie()
self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) ,["""[CLS] This is a extra_id_100"""] )
trie.add("""[CLS]""" )
trie.add("""extra_id_1""" )
trie.add("""extra_id_100""" )
self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) ,["""[CLS]""", """ This is a """, """extra_id_100"""] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = Trie()
trie.add("""A""" )
self.assertEqual(trie.split("""ABC""" ) ,["""A""", """BC"""] )
self.assertEqual(trie.split("""BCA""" ) ,["""BC""", """A"""] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = Trie()
trie.add("""TOKEN]""" )
trie.add("""[SPECIAL_TOKEN]""" )
self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) ,["""This is something """, """[SPECIAL_TOKEN]"""] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = Trie()
trie.add("""A""" )
trie.add("""P""" )
trie.add("""[SPECIAL_TOKEN]""" )
self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) ,["""This is something """, """[SPECIAL_TOKEN]"""] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = Trie()
trie.add("""AB""" )
trie.add("""B""" )
trie.add("""C""" )
self.assertEqual(trie.split("""ABC""" ) ,["""AB""", """C"""] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = Trie()
trie.add("""ABC""" )
trie.add("""B""" )
trie.add("""CD""" )
self.assertEqual(trie.split("""ABCD""" ) ,["""ABC""", """D"""] )
def UpperCamelCase_ ( self ):
# Even if the offsets are wrong, we necessarily output correct string
# parts.
lowerCamelCase__ = Trie()
lowerCamelCase__ = trie.cut_text("""ABC""" ,[0, 0, 2, 1, 2, 3] )
self.assertEqual(_lowerCAmelCase ,["""AB""", """C"""] )
| 50
|
"""simple docstring"""
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
_snake_case = logging.getLogger(__name__)
_snake_case = "Hello world! cécé herlolip"
_snake_case = namedtuple(
"BertAbsConfig",
[
"temp_dir",
"large",
"use_bert_emb",
"finetune_bert",
"encoder",
"share_emb",
"max_pos",
"enc_layers",
"enc_hidden_size",
"enc_heads",
"enc_ff_size",
"enc_dropout",
"dec_layers",
"dec_hidden_size",
"dec_heads",
"dec_ff_size",
"dec_dropout",
],
)
def snake_case ( _a: Union[str, Any] , _a: Dict )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = BertAbsConfig(
temp_dir='.' , finetune_bert=_a , large=_a , share_emb=_a , use_bert_emb=_a , encoder='bert' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
lowerCamelCase__ = torch.load(_a , lambda _a , _a : storage )
lowerCamelCase__ = AbsSummarizer(_a , torch.device('cpu' ) , _a )
original.eval()
lowerCamelCase__ = BertAbsSummarizer(_a , torch.device('cpu' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('convert the model' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('Make sure that the models\' outputs are identical' )
lowerCamelCase__ = BertTokenizer.from_pretrained('bert-base-uncased' )
# prepare the model inputs
lowerCamelCase__ = tokenizer.encode('This is sample éàalj\'-.' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_a )) )
lowerCamelCase__ = torch.tensor(_a ).unsqueeze(0 )
lowerCamelCase__ = tokenizer.encode('This is sample 3 éàalj\'-.' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_a )) )
lowerCamelCase__ = torch.tensor(_a ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
lowerCamelCase__ = encoder_input_ids
lowerCamelCase__ = decoder_input_ids
lowerCamelCase__ = lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = lowerCamelCase__ = None
lowerCamelCase__ = lowerCamelCase__ = None
lowerCamelCase__ = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
lowerCamelCase__ = original(_a , _a , _a , _a , _a , _a , _a )[0]
lowerCamelCase__ = original.generator(_a )
lowerCamelCase__ = new_model(
_a , _a , _a , _a , _a )[0]
lowerCamelCase__ = new_model.generator(_a )
lowerCamelCase__ = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(_a ) )
lowerCamelCase__ = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(_a ) )
lowerCamelCase__ = torch.allclose(_a , _a , atol=1E-3 )
if are_identical:
logging.info('all weights are equal up to 1e-3' )
else:
raise ValueError('the weights are different. The new model is likely different from the original one.' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('saving the model\'s state dictionary' )
torch.save(
new_model.state_dict() , './bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin' )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"--bertabs_checkpoint_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model.",
)
_snake_case = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 510
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections import Counter
from random import random
class lowerCamelCase_ :
def __init__( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = {}
def __lowercase ( self : Optional[Any] , lowerCAmelCase__ : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = {}
def __lowercase ( self : Union[str, Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : str , lowerCAmelCase__ : float ):
"""simple docstring"""
if nodea not in self.connections:
self.add_node(lowerCAmelCase__ )
if nodea not in self.connections:
self.add_node(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = probability
def __lowercase ( self : Optional[int] ):
"""simple docstring"""
return list(self.connections )
def __lowercase ( self : str , lowerCAmelCase__ : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
SCREAMING_SNAKE_CASE : Dict = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def UpperCAmelCase ( A : str , A : list[tuple[str, str, float]] , A : int ):
SCREAMING_SNAKE_CASE : Union[str, Any] = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(A , A , A )
SCREAMING_SNAKE_CASE : str = Counter(graph.get_nodes() )
SCREAMING_SNAKE_CASE : Optional[Any] = start
for _ in range(A ):
SCREAMING_SNAKE_CASE : List[Any] = graph.transition(A )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 464
|
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def UpperCAmelCase ( A : Union[str, Any] , A : Optional[int] , A : Any ):
SCREAMING_SNAKE_CASE : List[str] = 1.5
SCREAMING_SNAKE_CASE : str = int(factor * num_class_images )
SCREAMING_SNAKE_CASE : Union[str, Any] = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=A , aesthetic_weight=0.1 )
os.makedirs(F"""{class_data_dir}/images""" , exist_ok=A )
if len(list(Path(F"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
SCREAMING_SNAKE_CASE : Union[str, Any] = client.query(text=A )
if len(A ) >= factor * num_class_images or num_images > 1e4:
break
else:
SCREAMING_SNAKE_CASE : Optional[int] = int(factor * num_images )
SCREAMING_SNAKE_CASE : str = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=A , aesthetic_weight=0.1 , )
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
SCREAMING_SNAKE_CASE : Tuple = tqdm(desc='''downloading real regularization images''' , total=A )
with open(F"""{class_data_dir}/caption.txt""" , '''w''' ) as fa, open(F"""{class_data_dir}/urls.txt""" , '''w''' ) as fa, open(
F"""{class_data_dir}/images.txt""" , '''w''' ) as fa:
while total < num_class_images:
SCREAMING_SNAKE_CASE : int = class_images[count]
count += 1
try:
SCREAMING_SNAKE_CASE : int = requests.get(images['''url'''] )
if img.status_code == 200:
SCREAMING_SNAKE_CASE : List[str] = Image.open(BytesIO(img.content ) )
with open(F"""{class_data_dir}/images/{total}.jpg""" , '''wb''' ) as f:
f.write(img.content )
fa.write(images['''caption'''] + '''\n''' )
fa.write(images['''url'''] + '''\n''' )
fa.write(F"""{class_data_dir}/images/{total}.jpg""" + '''\n''' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def UpperCAmelCase ( ):
SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser('''''' , add_help=A )
parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=A , type=A )
parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=A , type=A )
parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=200 , type=A )
return parser.parse_args()
if __name__ == "__main__":
lowerCAmelCase_ : List[str] = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 464
| 1
|
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
__magic_name__ = TypeVar('''T''')
def UpperCAmelCase__( __UpperCAmelCase : int ):
return (position - 1) // 2
def UpperCAmelCase__( __UpperCAmelCase : int ):
return (2 * position) + 1
def UpperCAmelCase__( __UpperCAmelCase : int ):
return (2 * position) + 2
class __SCREAMING_SNAKE_CASE ( Generic[T]):
"""simple docstring"""
def __init__( self ):
__snake_case : list[tuple[T, int]] = []
__snake_case : dict[T, int] = {}
__snake_case : int = 0
def __len__( self ):
return self.elements
def __repr__( self ):
return str(self.heap )
def lowercase_ ( self ):
# Check if the priority queue is empty
return self.elements == 0
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase ):
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
__snake_case : Optional[int] = self.elements
self.elements += 1
self._bubble_up(_UpperCAmelCase )
def lowercase_ ( self ):
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
__snake_case , __snake_case : List[Any] = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
__snake_case , __snake_case : Union[str, Any] = self.heap[0]
self._bubble_down(_UpperCAmelCase )
return elem
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase ):
# Update the weight of the given key
__snake_case : Optional[Any] = self.position_map[elem]
__snake_case : Tuple = (elem, weight)
if position > 0:
__snake_case : Any = get_parent_position(_UpperCAmelCase )
__snake_case , __snake_case : Tuple = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(_UpperCAmelCase )
else:
self._bubble_down(_UpperCAmelCase )
else:
self._bubble_down(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase ):
# Place a node at the proper position (upward movement) [to be used internally
# only]
__snake_case : Union[str, Any] = self.position_map[elem]
if curr_pos == 0:
return None
__snake_case : Optional[int] = get_parent_position(_UpperCAmelCase )
__snake_case , __snake_case : List[Any] = self.heap[curr_pos]
__snake_case , __snake_case : List[str] = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase )
return self._bubble_up(_UpperCAmelCase )
return None
def lowercase_ ( self , _UpperCAmelCase ):
# Place a node at the proper position (downward movement) [to be used
# internally only]
__snake_case : int = self.position_map[elem]
__snake_case , __snake_case : str = self.heap[curr_pos]
__snake_case : List[Any] = get_child_left_position(_UpperCAmelCase )
__snake_case : Optional[Any] = get_child_right_position(_UpperCAmelCase )
if child_left_position < self.elements and child_right_position < self.elements:
__snake_case , __snake_case : Any = self.heap[child_left_position]
__snake_case , __snake_case : List[Any] = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase )
return self._bubble_down(_UpperCAmelCase )
if child_left_position < self.elements:
__snake_case , __snake_case : str = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase )
return self._bubble_down(_UpperCAmelCase )
else:
return None
if child_right_position < self.elements:
__snake_case , __snake_case : Tuple = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase )
return self._bubble_down(_UpperCAmelCase )
return None
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase ):
# Swap the nodes at the given positions
__snake_case : int = self.heap[nodea_pos][0]
__snake_case : Dict = self.heap[nodea_pos][0]
__snake_case , __snake_case : Union[str, Any] = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
__snake_case : Dict = nodea_pos
__snake_case : Optional[Any] = nodea_pos
class __SCREAMING_SNAKE_CASE ( Generic[T]):
"""simple docstring"""
def __init__( self ):
__snake_case : dict[T, dict[T, int]] = {}
__snake_case : int = 0
def __repr__( self ):
return str(self.connections )
def __len__( self ):
return self.nodes
def lowercase_ ( self , _UpperCAmelCase ):
# Add a node in the graph if it is not in the graph
if node not in self.connections:
__snake_case : Any = {}
self.nodes += 1
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# Add an edge between 2 nodes in the graph
self.add_node(_UpperCAmelCase )
self.add_node(_UpperCAmelCase )
__snake_case : Tuple = weight
__snake_case : int = weight
def UpperCAmelCase__( __UpperCAmelCase : GraphUndirectedWeighted[T] , ):
__snake_case : dict[T, int] = {node: maxsize for node in graph.connections}
__snake_case : dict[T, T | None] = {node: None for node in graph.connections}
__snake_case : MinPriorityQueue[T] = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(__UpperCAmelCase , __UpperCAmelCase )
if priority_queue.is_empty():
return dist, parent
# initialization
__snake_case : Optional[Any] = priority_queue.extract_min()
__snake_case : Dict = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__snake_case : Optional[Any] = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__UpperCAmelCase , dist[neighbour] )
__snake_case : Optional[int] = node
# running prim's algorithm
while not priority_queue.is_empty():
__snake_case : Any = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__snake_case : List[str] = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__UpperCAmelCase , dist[neighbour] )
__snake_case : Any = node
return dist, parent
| 576
|
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
__magic_name__ = 10
def UpperCAmelCase__( __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : list[int] , __UpperCAmelCase : int ):
for i in range(__UpperCAmelCase , __UpperCAmelCase ):
if array[i] == target:
return i
return -1
def UpperCAmelCase__( __UpperCAmelCase : list[int] , __UpperCAmelCase : int ):
__snake_case : Tuple = 0
__snake_case : Any = len(__UpperCAmelCase )
while left <= right:
if right - left < precision:
return lin_search(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
__snake_case : List[Any] = (left + right) // 3 + 1
__snake_case : Any = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
__snake_case : int = one_third - 1
elif array[two_third] < target:
__snake_case : Any = two_third + 1
else:
__snake_case : Dict = one_third + 1
__snake_case : str = two_third - 1
else:
return -1
def UpperCAmelCase__( __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : list[int] , __UpperCAmelCase : int ):
if left < right:
if right - left < precision:
return lin_search(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
__snake_case : List[str] = (left + right) // 3 + 1
__snake_case : str = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(__UpperCAmelCase , one_third - 1 , __UpperCAmelCase , __UpperCAmelCase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , __UpperCAmelCase , __UpperCAmelCase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
__magic_name__ = input('''Enter numbers separated by comma:\n''').strip()
__magic_name__ = [int(item.strip()) for item in user_input.split(''',''')]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
__magic_name__ = int(input('''Enter the number to be found in the list:\n''').strip())
__magic_name__ = ite_ternary_search(collection, target)
__magic_name__ = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F'''Iterative search: {target} found at positions: {resulta}''')
print(F'''Recursive search: {target} found at positions: {resulta}''')
else:
print('''Not found''')
| 576
| 1
|
from math import isqrt, loga
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> list[int]:
'''simple docstring'''
__UpperCAmelCase : str = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , lowercase_ , lowercase_ ):
__UpperCAmelCase : List[str] = False
return [i for i in range(2 , lowercase_ ) if is_prime[i]]
def __SCREAMING_SNAKE_CASE ( lowercase_ = 800800 , lowercase_ = 800800 ) -> int:
'''simple docstring'''
__UpperCAmelCase : str = degree * loga(lowercase_ )
__UpperCAmelCase : List[Any] = int(lowercase_ )
__UpperCAmelCase : int = calculate_prime_numbers(lowercase_ )
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : Tuple = 0
__UpperCAmelCase : List[Any] = len(lowercase_ ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F'{solution() = }')
| 675
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : int = '''bert'''
def __init__( self , lowercase__=3_0_5_2_2 , lowercase__=7_6_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=3_0_7_2 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=1e-12 , lowercase__=0 , lowercase__="absolute" , lowercase__=True , lowercase__=None , **lowercase__ , ):
super().__init__(pad_token_id=lowercase__ , **lowercase__)
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : Any = hidden_size
__UpperCAmelCase : str = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : Tuple = hidden_act
__UpperCAmelCase : int = intermediate_size
__UpperCAmelCase : List[Any] = hidden_dropout_prob
__UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
__UpperCAmelCase : List[Any] = max_position_embeddings
__UpperCAmelCase : Union[str, Any] = type_vocab_size
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : List[Any] = layer_norm_eps
__UpperCAmelCase : List[str] = position_embedding_type
__UpperCAmelCase : Optional[Any] = use_cache
__UpperCAmelCase : List[Any] = classifier_dropout
class lowerCamelCase ( _UpperCamelCase ):
@property
def A( self):
if self.task == "multiple-choice":
__UpperCAmelCase : Optional[int] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCAmelCase : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
])
| 675
| 1
|
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__lowerCamelCase = logging.get_logger(__name__)
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = ["input_features"]
def __init__( self : str , __snake_case : List[Any]=8_0 , __snake_case : Dict=1_6_0_0_0 , __snake_case : Union[str, Any]=1_6_0 , __snake_case : Optional[int]=3_0 , __snake_case : List[str]=4_0_0 , __snake_case : Any=0.0 , __snake_case : int=False , **__snake_case : Optional[Any] , ) -> Optional[Any]:
super().__init__(
feature_size=__snake_case , sampling_rate=__snake_case , padding_value=__snake_case , return_attention_mask=__snake_case , **__snake_case , )
__magic_name__: List[str] = n_fft
__magic_name__: Dict = hop_length
__magic_name__: Optional[Any] = chunk_length
__magic_name__: List[Any] = chunk_length * sampling_rate
__magic_name__: List[Any] = self.n_samples // hop_length
__magic_name__: List[str] = sampling_rate
__magic_name__: str = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__snake_case , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=__snake_case , norm="""slaney""" , mel_scale="""slaney""" , )
def lowerCamelCase__ ( self : str , __snake_case : np.array ) -> np.ndarray:
__magic_name__: Tuple = spectrogram(
__snake_case , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="""log10""" , )
__magic_name__: Optional[Any] = log_spec[:, :-1]
__magic_name__: int = np.maximum(__snake_case , log_spec.max() - 8.0 )
__magic_name__: Union[str, Any] = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def lowerCamelCase__ ( __snake_case : List[np.ndarray] , __snake_case : List[np.ndarray] , __snake_case : float = 0.0 ) -> List[np.ndarray]:
if attention_mask is not None:
__magic_name__: str = np.array(__snake_case , np.intaa )
__magic_name__: Dict = []
for vector, length in zip(__snake_case , attention_mask.sum(-1 ) ):
__magic_name__: Optional[int] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
__magic_name__: Dict = padding_value
normed_input_values.append(__snake_case )
else:
__magic_name__: List[Any] = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self : Union[str, Any] , __snake_case : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __snake_case : bool = True , __snake_case : Optional[int] = None , __snake_case : Optional[Union[str, TensorType]] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[str] = "max_length" , __snake_case : Optional[int] = None , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , **__snake_case : Optional[Any] , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
F' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
F' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
__magic_name__: Dict = isinstance(__snake_case , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
__magic_name__: Optional[Any] = is_batched_numpy or (
isinstance(__snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__magic_name__: List[Any] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__snake_case , np.ndarray ):
__magic_name__: List[str] = np.asarray(__snake_case , dtype=np.floataa )
elif isinstance(__snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__magic_name__: str = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__magic_name__: str = [np.asarray([raw_speech] ).T]
__magic_name__: str = BatchFeature({"""input_features""": raw_speech} )
# convert into correct format for padding
__magic_name__: Optional[Any] = self.pad(
__snake_case , padding=__snake_case , max_length=max_length if max_length else self.n_samples , truncation=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
__magic_name__: List[Any] = self.zero_mean_unit_var_norm(
padded_inputs["""input_features"""] , attention_mask=padded_inputs["""attention_mask"""] , padding_value=self.padding_value , )
__magic_name__: List[Any] = np.stack(padded_inputs["""input_features"""] , axis=0 )
# make sure list is in array format
__magic_name__: int = padded_inputs.get("""input_features""" ).transpose(2 , 0 , 1 )
__magic_name__: Union[str, Any] = [self._np_extract_fbank_features(__snake_case ) for waveform in input_features[0]]
if isinstance(input_features[0] , __snake_case ):
__magic_name__: Dict = [np.asarray(__snake_case , dtype=np.floataa ) for feature in input_features]
else:
__magic_name__: List[str] = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
__magic_name__: Optional[Any] = padded_inputs["""attention_mask"""][:, :: self.hop_length]
if return_tensors is not None:
__magic_name__: Optional[int] = padded_inputs.convert_to_tensors(__snake_case )
return padded_inputs
def lowerCamelCase__ ( self : List[str] ) -> Dict[str, Any]:
__magic_name__: Tuple = copy.deepcopy(self.__dict__ )
__magic_name__: Union[str, Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 96
|
"""simple docstring"""
def a ( __UpperCAmelCase : List[Any] ) -> str:
__magic_name__: Optional[int] = [0] * len(__UpperCAmelCase )
__magic_name__: str = []
__magic_name__: Any = []
__magic_name__: Union[str, Any] = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__UpperCAmelCase ) ):
if indegree[i] == 0:
queue.append(__UpperCAmelCase )
while queue:
__magic_name__: Optional[Any] = queue.pop(0 )
cnt += 1
topo.append(__UpperCAmelCase )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(__UpperCAmelCase )
if cnt != len(__UpperCAmelCase ):
print("""Cycle exists""" )
else:
print(__UpperCAmelCase )
# Adjacency List of Graph
__lowerCamelCase = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 96
| 1
|
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : List[Any] = tempfile.mkdtemp()
lowerCamelCase__ : int = BlipImageProcessor()
lowerCamelCase__ : Tuple = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
lowerCamelCase__ : List[Any] = BlipProcessor(UpperCamelCase__ , UpperCamelCase__ )
processor.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self: List[Any] , **UpperCamelCase__: str ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).tokenizer
def lowerCamelCase_ ( self: Union[str, Any] , **UpperCamelCase__: Optional[Any] ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).image_processor
def lowerCamelCase_ ( self: List[str] ):
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Dict = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase__ : Optional[int] = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Dict = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ : Optional[Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCamelCase__ : int = self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0 )
lowerCamelCase__ : Any = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : int = self.get_image_processor()
lowerCamelCase__ : int = self.get_tokenizer()
lowerCamelCase__ : int = BlipProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = self.prepare_image_inputs()
lowerCamelCase__ : List[Any] = image_processor(UpperCamelCase__ , return_tensors="""np""" )
lowerCamelCase__ : Dict = processor(images=UpperCamelCase__ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Optional[Any] = self.get_image_processor()
lowerCamelCase__ : List[str] = self.get_tokenizer()
lowerCamelCase__ : Optional[Any] = BlipProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : str = """lower newer"""
lowerCamelCase__ : List[str] = processor(text=UpperCamelCase__ )
lowerCamelCase__ : str = tokenizer(UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : List[Any] = self.get_image_processor()
lowerCamelCase__ : Optional[Any] = self.get_tokenizer()
lowerCamelCase__ : Union[str, Any] = BlipProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : str = """lower newer"""
lowerCamelCase__ : Union[str, Any] = self.prepare_image_inputs()
lowerCamelCase__ : int = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : Tuple = self.get_image_processor()
lowerCamelCase__ : Union[str, Any] = self.get_tokenizer()
lowerCamelCase__ : Any = BlipProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase__ : Optional[int] = processor.batch_decode(UpperCamelCase__ )
lowerCamelCase__ : Tuple = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : List[Any] = self.get_image_processor()
lowerCamelCase__ : Tuple = self.get_tokenizer()
lowerCamelCase__ : Tuple = BlipProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : str = """lower newer"""
lowerCamelCase__ : Tuple = self.prepare_image_inputs()
lowerCamelCase__ : List[str] = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 631
|
'''simple docstring'''
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
_A : int =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
print("""Loading config file...""" )
def flatten_yaml_as_dict(UpperCamelCase , UpperCamelCase="" , UpperCamelCase="." ):
lowerCamelCase__ : Optional[int] = []
for k, v in d.items():
lowerCamelCase__ : Optional[int] = parent_key + sep + k if parent_key else k
if isinstance(UpperCamelCase , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(UpperCamelCase , UpperCamelCase , sep=UpperCamelCase ).items() )
else:
items.append((new_key, v) )
return dict(UpperCamelCase )
lowerCamelCase__ : Any = argparse.Namespace()
with open(UpperCamelCase , """r""" ) as yaml_file:
try:
lowerCamelCase__ : int = yaml.load(UpperCamelCase , Loader=yaml.FullLoader )
lowerCamelCase__ : Tuple = flatten_yaml_as_dict(UpperCamelCase )
for k, v in flat_cfg.items():
setattr(UpperCamelCase , UpperCamelCase , UpperCamelCase )
except yaml.YAMLError as exc:
logger.error("""Error while loading config file: {}. Error message: {}""".format(UpperCamelCase , str(UpperCamelCase ) ) )
return config
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
lowerCamelCase__ : Union[str, Any] = MobileViTVaConfig()
lowerCamelCase__ : str = False
# dataset
if task_name.startswith("""imagenet1k_""" ):
lowerCamelCase__ : Optional[Any] = 1000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
lowerCamelCase__ : int = 384
else:
lowerCamelCase__ : Optional[int] = 256
lowerCamelCase__ : str = """imagenet-1k-id2label.json"""
elif task_name.startswith("""imagenet21k_to_1k_""" ):
lowerCamelCase__ : Tuple = 21000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
lowerCamelCase__ : str = 384
else:
lowerCamelCase__ : Any = 256
lowerCamelCase__ : int = """imagenet-22k-id2label.json"""
elif task_name.startswith("""ade20k_""" ):
lowerCamelCase__ : Dict = 151
lowerCamelCase__ : str = 512
lowerCamelCase__ : List[Any] = """ade20k-id2label.json"""
lowerCamelCase__ : Union[str, Any] = True
elif task_name.startswith("""voc_""" ):
lowerCamelCase__ : Tuple = 21
lowerCamelCase__ : Optional[int] = 512
lowerCamelCase__ : List[Any] = """pascal-voc-id2label.json"""
lowerCamelCase__ : Tuple = True
# orig_config
lowerCamelCase__ : Optional[int] = load_orig_config_file(UpperCamelCase )
assert getattr(UpperCamelCase , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model"
lowerCamelCase__ : int = getattr(UpperCamelCase , """model.classification.mitv2.width_multiplier""" , 1.0 )
assert (
getattr(UpperCamelCase , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
lowerCamelCase__ : Tuple = getattr(UpperCamelCase , """model.classification.activation.name""" , """swish""" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
lowerCamelCase__ : Any = getattr(UpperCamelCase , """model.segmentation.output_stride""" , 16 )
if "_deeplabv3" in task_name:
lowerCamelCase__ : str = getattr(UpperCamelCase , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] )
lowerCamelCase__ : Tuple = getattr(UpperCamelCase , """model.segmentation.deeplabv3.aspp_out_channels""" , 512 )
lowerCamelCase__ : List[Any] = getattr(UpperCamelCase , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 )
# id2label
lowerCamelCase__ : Tuple = """huggingface/label-files"""
lowerCamelCase__ : Optional[Any] = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase__ : Union[str, Any] = {int(UpperCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : int = idalabel
lowerCamelCase__ : Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any:
lowerCamelCase__ : List[Any] = dct.pop(UpperCamelCase )
lowerCamelCase__ : Dict = val
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False ) -> Tuple:
if base_model:
lowerCamelCase__ : Optional[int] = """"""
else:
lowerCamelCase__ : Optional[Any] = """mobilevitv2."""
lowerCamelCase__ : List[str] = []
for k in state_dict.keys():
if k[:8] == "encoder.":
lowerCamelCase__ : Optional[Any] = k[8:]
else:
lowerCamelCase__ : Optional[Any] = k
if ".block." in k:
lowerCamelCase__ : Dict = k_new.replace(""".block.""" , """.""" )
if ".conv." in k:
lowerCamelCase__ : List[Any] = k_new.replace(""".conv.""" , """.convolution.""" )
if ".norm." in k:
lowerCamelCase__ : str = k_new.replace(""".norm.""" , """.normalization.""" )
if "conv_1." in k:
lowerCamelCase__ : Any = k_new.replace("""conv_1.""" , f'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if f'''layer_{i}.''' in k:
lowerCamelCase__ : Optional[Any] = k_new.replace(f'''layer_{i}.''' , f'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
lowerCamelCase__ : Dict = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" )
if ".red_1x1." in k:
lowerCamelCase__ : str = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" )
for i in [3, 4, 5]:
if f'''layer_{i}.0.''' in k:
lowerCamelCase__ : List[str] = k_new.replace(f'''layer_{i}.0.''' , f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if f'''layer_{i}.1.local_rep.0.''' in k:
lowerCamelCase__ : Optional[Any] = k_new.replace(f'''layer_{i}.1.local_rep.0.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if f'''layer_{i}.1.local_rep.1.''' in k:
lowerCamelCase__ : Dict = k_new.replace(f'''layer_{i}.1.local_rep.1.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
lowerCamelCase__ : int = [0, 1]
elif i == 4:
lowerCamelCase__ : str = [0, 1, 2, 3]
elif i == 5:
lowerCamelCase__ : Dict = [0, 1, 2]
for j in j_in:
if f'''layer_{i}.1.global_rep.{j}.''' in k:
lowerCamelCase__ : List[Any] = k_new.replace(
f'''layer_{i}.1.global_rep.{j}.''' , f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if f'''layer_{i}.1.global_rep.{j+1}.''' in k:
lowerCamelCase__ : Optional[int] = k_new.replace(
f'''layer_{i}.1.global_rep.{j+1}.''' , f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if f'''layer_{i}.1.conv_proj.''' in k:
lowerCamelCase__ : Optional[int] = k_new.replace(f'''layer_{i}.1.conv_proj.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
lowerCamelCase__ : str = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" )
if "pre_norm_attn.1." in k:
lowerCamelCase__ : str = k_new.replace("""pre_norm_attn.1.""" , """attention.""" )
if "pre_norm_ffn.0." in k:
lowerCamelCase__ : Optional[Any] = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" )
if "pre_norm_ffn.1." in k:
lowerCamelCase__ : Union[str, Any] = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" )
if "pre_norm_ffn.3." in k:
lowerCamelCase__ : List[Any] = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" )
if "classifier.1." in k:
lowerCamelCase__ : Tuple = k_new.replace("""classifier.1.""" , """classifier.""" )
if "seg_head." in k:
lowerCamelCase__ : Optional[int] = k_new.replace("""seg_head.""" , """segmentation_head.""" )
if ".aspp_layer." in k:
lowerCamelCase__ : Any = k_new.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in k:
lowerCamelCase__ : Any = k_new.replace(""".aspp_pool.""" , """.""" )
rename_keys.append((k, k_new) )
return rename_keys
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[str]:
lowerCamelCase__ : Union[str, Any] = []
for k in state_dict.keys():
if k.startswith("""seg_head.aux_head.""" ):
keys_to_ignore.append(UpperCamelCase )
for k in keys_to_ignore:
state_dict.pop(UpperCamelCase , UpperCamelCase )
def SCREAMING_SNAKE_CASE_ () -> Dict:
lowerCamelCase__ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
lowerCamelCase__ : Tuple = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
lowerCamelCase__ : str = get_mobilevitva_config(UpperCamelCase , UpperCamelCase )
# load original state_dict
lowerCamelCase__ : List[str] = torch.load(UpperCamelCase , map_location="""cpu""" )
# load huggingface model
if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ):
lowerCamelCase__ : int = MobileViTVaForSemanticSegmentation(UpperCamelCase ).eval()
lowerCamelCase__ : Tuple = False
else:
lowerCamelCase__ : int = MobileViTVaForImageClassification(UpperCamelCase ).eval()
lowerCamelCase__ : Optional[Any] = False
# remove and rename some keys of load the original model
lowerCamelCase__ : Tuple = checkpoint
remove_unused_keys(UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = create_rename_keys(UpperCamelCase , base_model=UpperCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# load modified state_dict
model.load_state_dict(UpperCamelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowerCamelCase__ : int = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowerCamelCase__ : Dict = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowerCamelCase__ : str = model(**UpperCamelCase )
# verify classification model
if task_name.startswith("""imagenet""" ):
lowerCamelCase__ : Dict = outputs.logits
lowerCamelCase__ : Optional[Any] = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
lowerCamelCase__ : Optional[Any] = torch.tensor([-1.63_36E00, -7.32_04E-02, -5.18_83E-01] )
assert torch.allclose(logits[0, :3] , UpperCamelCase , atol=1E-4 )
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_A : Optional[int] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''',
default='''imagenet1k_256''',
type=str,
help=(
'''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '''
'''
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
'''
),
choices=[
'''imagenet1k_256''',
'''imagenet1k_384''',
'''imagenet21k_to_1k_256''',
'''imagenet21k_to_1k_384''',
'''ade20k_deeplabv3''',
'''voc_deeplabv3''',
],
)
parser.add_argument(
'''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
_A : Dict =parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 631
| 1
|
"""simple docstring"""
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def A ( snake_case__ = 3 ):
'''simple docstring'''
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError("""number of qubits must be a integer.""" )
if number_of_qubits <= 0:
raise ValueError("""number of qubits must be > 0.""" )
if math.floor(_SCREAMING_SNAKE_CASE ) != number_of_qubits:
raise ValueError("""number of qubits must be exact integer.""" )
if number_of_qubits > 10:
raise ValueError("""number of qubits too large to simulate(>10).""" )
SCREAMING_SNAKE_CASE__ = QuantumRegister(_SCREAMING_SNAKE_CASE , """qr""" )
SCREAMING_SNAKE_CASE__ = ClassicalRegister(_SCREAMING_SNAKE_CASE , """cr""" )
SCREAMING_SNAKE_CASE__ = QuantumCircuit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__ = number_of_qubits
for i in range(_SCREAMING_SNAKE_CASE ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_SCREAMING_SNAKE_CASE ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_SCREAMING_SNAKE_CASE , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# simulate with 10000 shots
SCREAMING_SNAKE_CASE__ = Aer.get_backend("""qasm_simulator""" )
SCREAMING_SNAKE_CASE__ = execute(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , shots=1_00_00 )
return job.result().get_counts(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(
F'Total count for quantum fourier transform state is: \\n {quantum_fourier_transform(3)}'
)
| 196
|
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__lowercase : Dict = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase_ )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , *__a , **__a ):
'''simple docstring'''
super().__init__(*__a , **__a )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def __UpperCAmelCase ( self , __a=None ):
'''simple docstring'''
__a : Optional[int] = {}
if top_k is not None:
__a : List[str] = top_k
return {}, {}, postprocess_params
def __call__( self , __a , **__a ):
'''simple docstring'''
return super().__call__(__a , **__a )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Any = load_image(__a )
__a : Optional[int] = self.image_processor(images=__a , return_tensors=self.framework )
return model_inputs
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : List[Any] = self.model(**__a )
return model_outputs
def __UpperCAmelCase ( self , __a , __a=5 ):
'''simple docstring'''
if top_k > self.model.config.num_labels:
__a : Union[str, Any] = self.model.config.num_labels
if self.framework == "pt":
__a : Optional[int] = model_outputs.logits.softmax(-1 )[0]
__a , __a : List[Any] = probs.topk(__a )
elif self.framework == "tf":
__a : Tuple = stable_softmax(model_outputs.logits , axis=-1 )[0]
__a : Tuple = tf.math.top_k(__a , k=__a )
__a , __a : List[Any] = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
__a : List[str] = scores.tolist()
__a : Any = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__a , __a )]
| 476
| 0
|
import requests
__magic_name__ ='''https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey='''
def __UpperCamelCase ( A ):
# fetching a list of articles in json format
UpperCamelCase__ = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['''articles'''] , 1 ):
print(f"{i}.) {article['title']}" )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key='''<Your BBC News API key goes here>''')
| 469
|
def __UpperCamelCase ( A = 600851475143 ):
try:
UpperCamelCase__ = int(A )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
UpperCamelCase__ = 1
UpperCamelCase__ = 2
while i * i <= n:
while n % i == 0:
UpperCamelCase__ = i
n //= i
i += 1
if n > 1:
UpperCamelCase__ = n
return int(A )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 469
| 1
|
from math import ceil
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase_: Any = list(range(0 , __SCREAMING_SNAKE_CASE ) )
lowerCamelCase_: List[Any] = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
lowerCamelCase_: int = []
for i in device_map_blocks:
if device_map_blocks.count(__SCREAMING_SNAKE_CASE ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(__SCREAMING_SNAKE_CASE )
# Missing blocks
lowerCamelCase_: Optional[int] = [i for i in blocks if i not in device_map_blocks]
lowerCamelCase_: Dict = [i for i in device_map_blocks if i not in blocks]
if len(__SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(
"""Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."""
""" These attention blocks were specified more than once: """ + str(__SCREAMING_SNAKE_CASE ) )
if len(__SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(
"""There are attention blocks for this model that are not specified in the device_map. Add these attention """
"""blocks to a device on the device_map: """ + str(__SCREAMING_SNAKE_CASE ) )
if len(__SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(
"""The device_map contains more attention blocks than this model has. Remove these from the device_map:"""
+ str(__SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase_: List[str] = list(range(__SCREAMING_SNAKE_CASE ) )
lowerCamelCase_: str = int(ceil(n_layers / len(__SCREAMING_SNAKE_CASE ) ) )
lowerCamelCase_: Optional[Any] = [layers[i : i + n_blocks] for i in range(0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )]
return dict(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
| 423
|
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ : List[Any] = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''')
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ = PegasusTokenizer
UpperCamelCase_ = PegasusTokenizerFast
UpperCamelCase_ = True
UpperCamelCase_ = True
def lowerCAmelCase__ ( self) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase__ : Any = PegasusTokenizer(UpperCamelCase)
tokenizer.save_pretrained(self.tmpdirname)
@cached_property
def lowerCAmelCase__ ( self) -> int:
return PegasusTokenizer.from_pretrained('google/pegasus-large')
def lowerCAmelCase__ ( self , **UpperCamelCase) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase)
def lowerCAmelCase__ ( self , UpperCamelCase) -> Optional[int]:
return ("This is a test", "This is a test")
def lowerCAmelCase__ ( self) -> Dict:
UpperCamelCase__ : List[str] = '</s>'
UpperCamelCase__ : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase) , UpperCamelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase) , UpperCamelCase)
def lowerCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase__ : int = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<pad>')
self.assertEqual(vocab_keys[1] , '</s>')
self.assertEqual(vocab_keys[-1] , 'v')
self.assertEqual(len(UpperCamelCase) , 11_03)
def lowerCAmelCase__ ( self) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 11_03)
def lowerCAmelCase__ ( self) -> List[Any]:
UpperCamelCase__ : List[str] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname)
UpperCamelCase__ : Any = self.tokenizer_class.from_pretrained(self.tmpdirname)
UpperCamelCase__ : Union[str, Any] = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
UpperCamelCase__ : str = rust_tokenizer([raw_input_str] , return_tensors=UpperCamelCase , add_special_tokens=UpperCamelCase).input_ids[0]
UpperCamelCase__ : int = py_tokenizer([raw_input_str] , return_tensors=UpperCamelCase , add_special_tokens=UpperCamelCase).input_ids[0]
self.assertListEqual(UpperCamelCase , UpperCamelCase)
def lowerCAmelCase__ ( self) -> List[Any]:
UpperCamelCase__ : int = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
UpperCamelCase__ : Optional[Any] = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
UpperCamelCase__ : List[str] = [2, 4_13, 6_15, 1_14, 3, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
UpperCamelCase__ : Union[str, Any] = tokenizer([raw_input_str] , return_tensors=UpperCamelCase).input_ids[0]
self.assertListEqual(UpperCamelCase , UpperCamelCase)
def lowerCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase__ : int = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_61_03
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_03
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_05
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 10_24
UpperCamelCase__ : List[Any] = 'To ensure a smooth flow of bank resolutions.'
UpperCamelCase__ : Tuple = [4_13, 6_15, 1_14, 22_91, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
UpperCamelCase__ : int = tokenizer([raw_input_str] , return_tensors=UpperCamelCase).input_ids[0]
self.assertListEqual(UpperCamelCase , UpperCamelCase)
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3]) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def lowerCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase__ : Optional[int] = ['This is going to be way too long.' * 1_50, 'short example']
UpperCamelCase__ : str = ['not super long but more than 5 tokens', 'tiny']
UpperCamelCase__ : Optional[int] = self._large_tokenizer(UpperCamelCase , padding=UpperCamelCase , truncation=UpperCamelCase , return_tensors='pt')
UpperCamelCase__ : List[str] = self._large_tokenizer(
text_target=UpperCamelCase , max_length=5 , padding=UpperCamelCase , truncation=UpperCamelCase , return_tensors='pt')
assert batch.input_ids.shape == (2, 10_24)
assert batch.attention_mask.shape == (2, 10_24)
assert targets["input_ids"].shape == (2, 5)
assert len(UpperCamelCase) == 2 # input_ids, attention_mask.
@slow
def lowerCAmelCase__ ( self) -> Dict:
# fmt: off
UpperCamelCase__ : Union[str, Any] = {'input_ids': [[3_89_79, 1_43, 1_84_85, 6_06, 1_30, 2_66_69, 8_76_86, 1_21, 5_41_89, 11_29, 1_11, 2_66_69, 8_76_86, 1_21, 91_14, 1_47_87, 1_21, 1_32_49, 1_58, 5_92, 9_56, 1_21, 1_46_21, 3_15_76, 1_43, 6_26_13, 1_08, 96_88, 9_30, 4_34_30, 1_15_62, 6_26_13, 3_04, 1_08, 1_14_43, 8_97, 1_08, 93_14, 1_74_15, 6_33_99, 1_08, 1_14_43, 76_14, 1_83_16, 1_18, 42_84, 71_48, 1_24_30, 1_43, 14_00, 2_57_03, 1_58, 1_11, 42_84, 71_48, 1_17_72, 1_43, 2_12_97, 10_64, 1_58, 1_22, 2_04, 35_06, 17_54, 11_33, 1_47_87, 15_81, 1_15, 3_32_24, 44_82, 1_11, 13_55, 1_10, 2_91_73, 3_17, 5_08_33, 1_08, 2_01_47, 9_46_65, 1_11, 7_71_98, 1_07, 1], [1_10, 6_26_13, 1_17, 6_38, 1_12, 11_33, 1_21, 2_00_98, 13_55, 7_90_50, 1_38_72, 1_35, 15_96, 5_35_41, 13_52, 1_41, 1_30_39, 55_42, 1_24, 3_02, 5_18, 1_11, 2_68, 29_56, 1_15, 1_49, 44_27, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_39, 12_35, 27_99, 1_82_89, 1_77_80, 2_04, 1_09, 94_74, 12_96, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , )
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ = PegasusTokenizer
UpperCamelCase_ = PegasusTokenizerFast
UpperCamelCase_ = True
UpperCamelCase_ = True
def lowerCAmelCase__ ( self) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase__ : str = PegasusTokenizer(UpperCamelCase , offset=0 , mask_token_sent=UpperCamelCase , mask_token='[MASK]')
tokenizer.save_pretrained(self.tmpdirname)
@cached_property
def lowerCAmelCase__ ( self) -> List[Any]:
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv')
def lowerCAmelCase__ ( self , **UpperCamelCase) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase)
def lowerCAmelCase__ ( self , UpperCamelCase) -> int:
return ("This is a test", "This is a test")
def lowerCAmelCase__ ( self) -> Dict:
UpperCamelCase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname)
UpperCamelCase__ : Union[str, Any] = self.tokenizer_class.from_pretrained(self.tmpdirname)
UpperCamelCase__ : Tuple = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
UpperCamelCase__ : List[str] = rust_tokenizer([raw_input_str] , return_tensors=UpperCamelCase , add_special_tokens=UpperCamelCase).input_ids[0]
UpperCamelCase__ : Optional[Any] = py_tokenizer([raw_input_str] , return_tensors=UpperCamelCase , add_special_tokens=UpperCamelCase).input_ids[0]
self.assertListEqual(UpperCamelCase , UpperCamelCase)
@require_torch
def lowerCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase__ : List[str] = ['This is going to be way too long.' * 10_00, 'short example']
UpperCamelCase__ : List[Any] = ['not super long but more than 5 tokens', 'tiny']
UpperCamelCase__ : Optional[Any] = self._large_tokenizer(UpperCamelCase , padding=UpperCamelCase , truncation=UpperCamelCase , return_tensors='pt')
UpperCamelCase__ : Optional[Any] = self._large_tokenizer(
text_target=UpperCamelCase , max_length=5 , padding=UpperCamelCase , truncation=UpperCamelCase , return_tensors='pt')
assert batch.input_ids.shape == (2, 40_96)
assert batch.attention_mask.shape == (2, 40_96)
assert targets["input_ids"].shape == (2, 5)
assert len(UpperCamelCase) == 2 # input_ids, attention_mask.
def lowerCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase__ : List[Any] = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
UpperCamelCase__ : str = self._large_tokenizer(UpperCamelCase).input_ids
self.assertListEqual(
UpperCamelCase , [1_82, 1_17, 1_42, 5_87, 42_11, 1_20, 1_17, 2_63, 1_12, 8_04, 1_09, 8_56, 2_50_16, 31_37, 4_64, 1_09, 2_69_55, 31_37, 1] , )
| 410
| 0
|
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _snake_case ( pl.LightningModule ):
def __init__( self ,UpperCamelCase ) -> Dict:
super().__init__()
snake_case__ :Union[str, Any] = model
snake_case__ :List[str] = 2
snake_case__ :Optional[int] = nn.Linear(self.model.config.hidden_size ,self.num_labels )
def lowerCAmelCase_ ( self ) -> Dict:
pass
def lowercase_ ( __snake_case : str , __snake_case : str , __snake_case : str ) -> List[Any]:
'''simple docstring'''
snake_case__ :Tuple = LongformerModel.from_pretrained(__snake_case )
snake_case__ :Optional[Any] = LightningModel(__snake_case )
snake_case__ :List[str] = torch.load(__snake_case , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
snake_case__ :Tuple = LongformerForQuestionAnswering.from_pretrained(__snake_case )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(__snake_case )
print(F'Conversion successful. Model saved under {pytorch_dump_folder_path}' )
if __name__ == "__main__":
__UpperCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__UpperCAmelCase : Union[str, Any] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 57
|
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 57
| 1
|
'''simple docstring'''
A__ : List[Any] = [
"""DownloadConfig""",
"""DownloadManager""",
"""DownloadMode""",
"""StreamingDownloadManager""",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 13
|
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase_ : int = 10_00 ) -> int:
__lowerCamelCase : Union[str, Any] = 3
__lowerCamelCase : Dict = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 13
| 1
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
a_ = 25_0004
a_ = 25_0020
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( snake_case__ , unittest.TestCase ):
UpperCAmelCase_ = MBartaaTokenizer
UpperCAmelCase_ = MBartaaTokenizerFast
UpperCAmelCase_ = True
UpperCAmelCase_ = True
def snake_case__ ( self):
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ : Dict = MBartaaTokenizer(lowercase_ , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=lowercase_)
tokenizer.save_pretrained(self.tmpdirname)
def snake_case__ ( self):
snake_case_ : List[str] = "<s>"
snake_case_ : Any = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_) , lowercase_)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_) , lowercase_)
def snake_case__ ( self):
snake_case_ : int = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , "<s>")
self.assertEqual(vocab_keys[1] , "<pad>")
self.assertEqual(vocab_keys[-1] , "<mask>")
self.assertEqual(len(lowercase_) , 10_54)
def snake_case__ ( self):
self.assertEqual(self.get_tokenizer().vocab_size , 10_54)
def snake_case__ ( self):
snake_case_ : Any = MBartaaTokenizer(lowercase_ , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=lowercase_)
snake_case_ : List[str] = tokenizer.tokenize("This is a test")
self.assertListEqual(lowercase_ , ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
snake_case_ : List[str] = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
lowercase_ , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , )
snake_case_ : Tuple = tokenizer.convert_tokens_to_ids(lowercase_)
self.assertListEqual(
lowercase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
snake_case_ : List[str] = tokenizer.convert_ids_to_tokens(lowercase_)
self.assertListEqual(
lowercase_ , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , )
@slow
def snake_case__ ( self):
# fmt: off
snake_case_ : int = {"input_ids": [[25_00_04, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [25_00_04, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_00_04, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name="facebook/mbart-large-50" , revision="d3913889c59cd5c9e456b269c376325eabad57e2" , )
def snake_case__ ( self):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
snake_case_ : Union[str, Any] = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart50", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})'):
snake_case_ : Dict = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_)
snake_case_ : Tuple = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_)
snake_case_ : List[Any] = tempfile.mkdtemp()
snake_case_ : Optional[int] = tokenizer_r.save_pretrained(lowercase_)
snake_case_ : Optional[Any] = tokenizer_p.save_pretrained(lowercase_)
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files))
snake_case_ : Any = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f)
self.assertSequenceEqual(lowercase_ , lowercase_)
# Checks everything loads correctly in the same way
snake_case_ : List[Any] = tokenizer_r.from_pretrained(lowercase_)
snake_case_ : str = tokenizer_p.from_pretrained(lowercase_)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_))
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowercase_)
# Save tokenizer rust, legacy_format=True
snake_case_ : List[Any] = tempfile.mkdtemp()
snake_case_ : List[Any] = tokenizer_r.save_pretrained(lowercase_ , legacy_format=lowercase_)
snake_case_ : List[Any] = tokenizer_p.save_pretrained(lowercase_)
# Checks it save with the same files
self.assertSequenceEqual(lowercase_ , lowercase_)
# Checks everything loads correctly in the same way
snake_case_ : List[str] = tokenizer_r.from_pretrained(lowercase_)
snake_case_ : Union[str, Any] = tokenizer_p.from_pretrained(lowercase_)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_))
shutil.rmtree(lowercase_)
# Save tokenizer rust, legacy_format=False
snake_case_ : Union[str, Any] = tempfile.mkdtemp()
snake_case_ : Optional[Any] = tokenizer_r.save_pretrained(lowercase_ , legacy_format=lowercase_)
snake_case_ : List[str] = tokenizer_p.save_pretrained(lowercase_)
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files))
# Checks everything loads correctly in the same way
snake_case_ : Tuple = tokenizer_r.from_pretrained(lowercase_)
snake_case_ : List[str] = tokenizer_p.from_pretrained(lowercase_)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_))
shutil.rmtree(lowercase_)
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( unittest.TestCase ):
UpperCAmelCase_ = """facebook/mbart-large-50-one-to-many-mmt"""
UpperCAmelCase_ = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
UpperCAmelCase_ = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
UpperCAmelCase_ = [EN_CODE, 8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2]
@classmethod
def snake_case__ ( cls):
snake_case_ : MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO")
snake_case_ : Dict = 1
return cls
def snake_case__ ( self):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 25_00_01)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 25_00_04)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 25_00_20)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["mr_IN"] , 25_00_38)
def snake_case__ ( self):
snake_case_ : int = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowercase_)
def snake_case__ ( self):
self.assertIn(lowercase_ , self.tokenizer.all_special_ids)
snake_case_ : List[str] = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
snake_case_ : List[Any] = self.tokenizer.decode(lowercase_ , skip_special_tokens=lowercase_)
snake_case_ : Optional[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowercase_)
self.assertEqual(lowercase_ , lowercase_)
self.assertNotIn(self.tokenizer.eos_token , lowercase_)
def snake_case__ ( self):
snake_case_ : Union[str, Any] = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , lowercase_)
snake_case_ : str = 10
snake_case_ : Any = self.tokenizer(lowercase_ , max_length=lowercase_ , truncation=lowercase_).input_ids[0]
self.assertEqual(ids[0] , lowercase_)
self.assertEqual(ids[-1] , 2)
self.assertEqual(len(lowercase_) , lowercase_)
def snake_case__ ( self):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"]) , [25_00_53, 25_00_01])
def snake_case__ ( self):
snake_case_ : Optional[int] = tempfile.mkdtemp()
snake_case_ : List[str] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowercase_)
snake_case_ : int = MBartaaTokenizer.from_pretrained(lowercase_)
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowercase_)
@require_torch
def snake_case__ ( self):
snake_case_ : Optional[Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowercase_ , return_tensors="pt")
snake_case_ : Optional[int] = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id)
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def snake_case__ ( self):
snake_case_ : Tuple = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowercase_ , truncation=lowercase_ , max_length=len(self.expected_src_tokens) , return_tensors="pt" , )
snake_case_ : Tuple = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id)
self.assertIsInstance(lowercase_ , lowercase_)
self.assertEqual((2, 14) , batch.input_ids.shape)
self.assertEqual((2, 14) , batch.attention_mask.shape)
snake_case_ : Optional[int] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowercase_)
self.assertEqual(2 , batch.decoder_input_ids[0, 0]) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE])
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
def snake_case__ ( self):
snake_case_ : Union[str, Any] = self.tokenizer(self.src_text , padding=lowercase_ , truncation=lowercase_ , max_length=3 , return_tensors="pt")
snake_case_ : List[Any] = self.tokenizer(
text_target=self.tgt_text , padding=lowercase_ , truncation=lowercase_ , max_length=10 , return_tensors="pt")
snake_case_ : str = targets["input_ids"]
snake_case_ : List[Any] = shift_tokens_right(lowercase_ , self.tokenizer.pad_token_id)
self.assertEqual(batch.input_ids.shape[1] , 3)
self.assertEqual(batch.decoder_input_ids.shape[1] , 10)
@require_torch
def snake_case__ ( self):
snake_case_ : Union[str, Any] = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR")
self.assertEqual(
nested_simplify(lowercase_) , {
# en_XX, A, test, EOS
"input_ids": [[25_00_04, 62, 30_34, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 25_00_01,
} , )
| 710
|
'''simple docstring'''
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
a_ = "src/diffusers"
a_ = "."
# This is to make sure the diffusers module imported is the one in the repo.
a_ = importlib.util.spec_from_file_location(
"diffusers",
os.path.join(DIFFUSERS_PATH, "__init__.py"),
submodule_search_locations=[DIFFUSERS_PATH],
)
a_ = spec.loader.load_module()
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return line.startswith(__SCREAMING_SNAKE_CASE ) or len(__SCREAMING_SNAKE_CASE ) <= 1 or re.search(r"^\s*\)(\s*->.*:|:)\s*$", __SCREAMING_SNAKE_CASE ) is not None
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case_ : Optional[int] = object_name.split("." )
snake_case_ : Dict = 0
# First let's find the module where our object lives.
snake_case_ : int = parts[i]
while i < len(__SCREAMING_SNAKE_CASE ) and not os.path.isfile(os.path.join(__SCREAMING_SNAKE_CASE, f'{module}.py' ) ):
i += 1
if i < len(__SCREAMING_SNAKE_CASE ):
snake_case_ : List[Any] = os.path.join(__SCREAMING_SNAKE_CASE, parts[i] )
if i >= len(__SCREAMING_SNAKE_CASE ):
raise ValueError(f'`object_name` should begin with the name of a module of diffusers but got {object_name}.' )
with open(os.path.join(__SCREAMING_SNAKE_CASE, f'{module}.py' ), "r", encoding="utf-8", newline="\n" ) as f:
snake_case_ : str = f.readlines()
# Now let's find the class / func in the code!
snake_case_ : Any = ""
snake_case_ : Optional[Any] = 0
for name in parts[i + 1 :]:
while (
line_index < len(__SCREAMING_SNAKE_CASE ) and re.search(rf'^{indent}(class|def)\s+{name}(\(|\:)', lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(__SCREAMING_SNAKE_CASE ):
raise ValueError(f' {object_name} does not match any function or class in {module}.' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
snake_case_ : Union[str, Any] = line_index
while line_index < len(__SCREAMING_SNAKE_CASE ) and _should_continue(lines[line_index], __SCREAMING_SNAKE_CASE ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
snake_case_ : Dict = lines[start_index:line_index]
return "".join(__SCREAMING_SNAKE_CASE )
a_ = re.compile(R"^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)")
a_ = re.compile(R"^\s*(\S+)->(\S+)(\s+.*|$)")
a_ = re.compile(R"<FILL\s+[^>]*>")
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case_ : str = code.split("\n" )
snake_case_ : Union[str, Any] = 0
while idx < len(__SCREAMING_SNAKE_CASE ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(__SCREAMING_SNAKE_CASE ):
return re.search(r"^(\s*)\S", lines[idx] ).groups()[0]
return ""
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case_ : Optional[Any] = len(get_indent(__SCREAMING_SNAKE_CASE ) ) > 0
if has_indent:
snake_case_ : Optional[Any] = f'class Bla:\n{code}'
snake_case_ : List[Any] = black.Mode(target_versions={black.TargetVersion.PYaa}, line_length=1_1_9, preview=__SCREAMING_SNAKE_CASE )
snake_case_ : List[str] = black.format_str(__SCREAMING_SNAKE_CASE, mode=__SCREAMING_SNAKE_CASE )
snake_case_ , snake_case_ : Optional[Any] = style_docstrings_in_code(__SCREAMING_SNAKE_CASE )
return result[len("class Bla:\n" ) :] if has_indent else result
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE, "r", encoding="utf-8", newline="\n" ) as f:
snake_case_ : List[Any] = f.readlines()
snake_case_ : str = []
snake_case_ : Tuple = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(__SCREAMING_SNAKE_CASE ):
snake_case_ : Union[str, Any] = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
snake_case_ , snake_case_ , snake_case_ : Union[str, Any] = search.groups()
snake_case_ : int = find_code_in_diffusers(__SCREAMING_SNAKE_CASE )
snake_case_ : List[Any] = get_indent(__SCREAMING_SNAKE_CASE )
snake_case_ : Dict = line_index + 1 if indent == theoretical_indent else line_index + 2
snake_case_ : Dict = theoretical_indent
snake_case_ : Tuple = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
snake_case_ : str = True
while line_index < len(__SCREAMING_SNAKE_CASE ) and should_continue:
line_index += 1
if line_index >= len(__SCREAMING_SNAKE_CASE ):
break
snake_case_ : Dict = lines[line_index]
snake_case_ : List[Any] = _should_continue(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ) and re.search(f'^{indent}# End copy', __SCREAMING_SNAKE_CASE ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
snake_case_ : Optional[int] = lines[start_index:line_index]
snake_case_ : Optional[Any] = "".join(__SCREAMING_SNAKE_CASE )
# Remove any nested `Copied from` comments to avoid circular copies
snake_case_ : int = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(__SCREAMING_SNAKE_CASE ) is None]
snake_case_ : Dict = "\n".join(__SCREAMING_SNAKE_CASE )
# Before comparing, use the `replace_pattern` on the original code.
if len(__SCREAMING_SNAKE_CASE ) > 0:
snake_case_ : Union[str, Any] = replace_pattern.replace("with", "" ).split("," )
snake_case_ : Optional[Any] = [_re_replace_pattern.search(__SCREAMING_SNAKE_CASE ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
snake_case_ , snake_case_ , snake_case_ : Optional[Any] = pattern.groups()
snake_case_ : Optional[int] = re.sub(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
if option.strip() == "all-casing":
snake_case_ : List[Any] = re.sub(obja.lower(), obja.lower(), __SCREAMING_SNAKE_CASE )
snake_case_ : Optional[int] = re.sub(obja.upper(), obja.upper(), __SCREAMING_SNAKE_CASE )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
snake_case_ : Optional[int] = blackify(lines[start_index - 1] + theoretical_code )
snake_case_ : Optional[Any] = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
snake_case_ : List[str] = lines[:start_index] + [theoretical_code] + lines[line_index:]
snake_case_ : List[str] = start_index + 1
if overwrite and len(__SCREAMING_SNAKE_CASE ) > 0:
# Warn the user a file has been modified.
print(f'Detected changes, rewriting {filename}.' )
with open(__SCREAMING_SNAKE_CASE, "w", encoding="utf-8", newline="\n" ) as f:
f.writelines(__SCREAMING_SNAKE_CASE )
return diffs
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE = False ):
"""simple docstring"""
snake_case_ : List[str] = glob.glob(os.path.join(__SCREAMING_SNAKE_CASE, "**/*.py" ), recursive=__SCREAMING_SNAKE_CASE )
snake_case_ : List[Any] = []
for filename in all_files:
snake_case_ : Tuple = is_copy_consistent(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
diffs += [f'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]
if not overwrite and len(__SCREAMING_SNAKE_CASE ) > 0:
snake_case_ : str = "\n".join(__SCREAMING_SNAKE_CASE )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
a_ = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 92
| 0
|
from __future__ import annotations
def lowerCamelCase_ ( _UpperCamelCase ) -> list[int]:
"""simple docstring"""
return [ord(_UpperCamelCase ) - 96 for elem in plain]
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
return "".join(chr(elem + 96 ) for elem in encoded )
def lowerCamelCase_ ( ) -> None:
"""simple docstring"""
snake_case_ : List[Any] = encode(input('''-> ''' ).strip().lower() )
print('''Encoded: ''' , _UpperCamelCase )
print('''Decoded:''' , decode(_UpperCamelCase ) )
if __name__ == "__main__":
main()
| 60
|
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Any , *UpperCamelCase : str , **UpperCamelCase : Any ):
'''simple docstring'''
warnings.warn(
'The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DonutImageProcessor instead.' , UpperCamelCase , )
super().__init__(*UpperCamelCase , **UpperCamelCase )
| 411
| 0
|
'''simple docstring'''
import string
def __snake_case ( _UpperCAmelCase : str):
for key in range(len(string.ascii_uppercase)):
UpperCamelCase = ''''''
for symbol in message:
if symbol in string.ascii_uppercase:
UpperCamelCase = string.ascii_uppercase.find(_A)
UpperCamelCase = num - key
if num < 0:
UpperCamelCase = num + len(string.ascii_uppercase)
UpperCamelCase = translated + string.ascii_uppercase[num]
else:
UpperCamelCase = translated + symbol
print(f'Decryption using Key #{key}: {translated}')
def __snake_case ( ):
UpperCamelCase = input('''Encrypted message: ''')
UpperCamelCase = message.upper()
decrypt(_A)
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 709
|
'''simple docstring'''
def __snake_case ( _UpperCAmelCase : Optional[int]):
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
UpperCamelCase = len(_UpperCAmelCase) if (len(_UpperCAmelCase) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8), '''Stack'''.center(_UpperCAmelCase), '''Postfix'''.center(_UpperCAmelCase), sep=''' | ''', )
print('''-''' * (print_width * 3 + 7))
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(_UpperCAmelCase) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(_UpperCAmelCase) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop()) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(_UpperCAmelCase) == 0:
stack.append(_UpperCAmelCase) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(_UpperCAmelCase) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop()) # pop stack & add to Postfix
stack.append(_UpperCAmelCase) # push x to stack
print(
x.center(8), (''''''.join(_UpperCAmelCase)).ljust(_UpperCAmelCase), (''''''.join(_UpperCAmelCase)).ljust(_UpperCAmelCase), sep=''' | ''', ) # Output in tabular format
while len(_UpperCAmelCase) > 0: # while stack is not empty
post_fix.append(stack.pop()) # pop stack & add to Postfix
print(
''' '''.center(8), (''''''.join(_UpperCAmelCase)).ljust(_UpperCAmelCase), (''''''.join(_UpperCAmelCase)).ljust(_UpperCAmelCase), sep=''' | ''', ) # Output in tabular format
return "".join(_UpperCAmelCase) # return Postfix as str
def __snake_case ( _UpperCAmelCase : str):
UpperCamelCase = list(infix[::-1]) # reverse the infix equation
for i in range(len(_UpperCAmelCase)):
if infix[i] == "(":
UpperCamelCase = ''')''' # change "(" to ")"
elif infix[i] == ")":
UpperCamelCase = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(_UpperCAmelCase)))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
snake_case_ : str = input('\nEnter an Infix Equation = ') # Input an Infix equation
snake_case_ : Union[str, Any] = ''.join(Infix.split()) # Remove spaces from the input
print('\n\t', Infix, '(Infix) -> ', infix_2_prefix(Infix), '(Prefix)')
| 350
| 0
|
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = JukeboxTokenizer
SCREAMING_SNAKE_CASE__ : List[Any] = {
'''artist''': '''Zac Brown Band''',
'''genres''': '''Country''',
'''lyrics''': '''I met a traveller from an antique land,\n Who said \"Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ''',
}
@require_torch
def __magic_name__( self :Dict ) -> str:
import torch
__SCREAMING_SNAKE_CASE : Optional[int] = JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''' )
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(**self.metas )['''input_ids''']
# fmt: off
__SCREAMING_SNAKE_CASE : int = [
torch.tensor([[
0, 0, 0, 7_169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1_069, 11]] ),
torch.tensor([[0, 0, 0, 1_069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def __magic_name__( self :int ) -> Dict:
import torch
__SCREAMING_SNAKE_CASE : Tuple = JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''' )
__SCREAMING_SNAKE_CASE : Dict = tokenizer(**self.metas )['''input_ids''']
# fmt: off
__SCREAMING_SNAKE_CASE : List[Any] = [
torch.tensor([[
0, 0, 0, 1_069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 696
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {'configuration_sew': ['SEW_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SEWConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'SEW_PRETRAINED_MODEL_ARCHIVE_LIST',
'SEWForCTC',
'SEWForSequenceClassification',
'SEWModel',
'SEWPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 466
| 0
|
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowercase : Any = "platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , ):
if attention_mask is None:
A : List[str] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
A : Optional[Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
A : Tuple = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A : int = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A : Tuple = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class __lowercase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=99 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=0.0_2 , ) -> Dict:
A : Any = parent
A : str = batch_size
A : int = seq_length
A : Union[str, Any] = is_training
A : str = use_labels
A : Dict = vocab_size
A : List[str] = hidden_size
A : List[str] = num_hidden_layers
A : Dict = num_attention_heads
A : Optional[int] = intermediate_size
A : List[str] = hidden_act
A : Dict = hidden_dropout_prob
A : str = attention_probs_dropout_prob
A : Tuple = max_position_embeddings
A : Union[str, Any] = eos_token_id
A : str = pad_token_id
A : Dict = bos_token_id
A : Tuple = initializer_range
def snake_case ( self ) -> int:
A : List[str] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
A : int = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
A : Optional[Any] = shift_tokens_right(__UpperCAmelCase , 1 , 2 )
A : int = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=__UpperCAmelCase , )
A : Any = prepare_blenderbot_inputs_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return config, inputs_dict
def snake_case ( self ) -> Tuple:
A , A : Union[str, Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
A : int = 20
A : Tuple = model_class_name(__UpperCAmelCase )
A : Union[str, Any] = model.encode(inputs_dict['''input_ids'''] )
A , A : List[str] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
A : str = model.init_cache(decoder_input_ids.shape[0] , __UpperCAmelCase , __UpperCAmelCase )
A : int = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
A : Union[str, Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
A : Tuple = model.decode(
decoder_input_ids[:, :-1] , __UpperCAmelCase , decoder_attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , decoder_position_ids=__UpperCAmelCase , )
A : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
A : int = model.decode(
decoder_input_ids[:, -1:] , __UpperCAmelCase , decoder_attention_mask=__UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__UpperCAmelCase , )
A : Dict = model.decode(__UpperCAmelCase , __UpperCAmelCase )
A : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}' )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
A : Dict = 20
A : Optional[int] = model_class_name(__UpperCAmelCase )
A : Optional[Any] = model.encode(inputs_dict['''input_ids'''] )
A , A : Optional[Any] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
A : Tuple = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
A : str = model.init_cache(decoder_input_ids.shape[0] , __UpperCAmelCase , __UpperCAmelCase )
A : Any = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
A : str = model.decode(
decoder_input_ids[:, :-1] , __UpperCAmelCase , decoder_attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , decoder_position_ids=__UpperCAmelCase , )
A : List[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
A : Union[str, Any] = model.decode(
decoder_input_ids[:, -1:] , __UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__UpperCAmelCase , decoder_position_ids=__UpperCAmelCase , )
A : Any = model.decode(__UpperCAmelCase , __UpperCAmelCase , decoder_attention_mask=__UpperCAmelCase )
A : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}' )
@require_flax
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = 99
def snake_case ( self ) -> Optional[int]:
A : Any = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
A : Union[str, Any] = input_ids.shape[0]
A : List[str] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def snake_case ( self ) -> Tuple:
A , A , A : str = self._get_config_and_data()
A : Optional[Any] = FlaxBlenderbotForConditionalGeneration(__UpperCAmelCase )
A : Dict = lm_model(input_ids=__UpperCAmelCase )
A : str = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , __UpperCAmelCase )
def snake_case ( self ) -> List[str]:
A : Union[str, Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
A : Union[str, Any] = FlaxBlenderbotForConditionalGeneration(__UpperCAmelCase )
A : str = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
A : List[str] = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
A : Union[str, Any] = lm_model(input_ids=__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase )
A : List[str] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , __UpperCAmelCase )
def snake_case ( self ) -> Optional[int]:
A : Union[str, Any] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
A : Any = shift_tokens_right(__UpperCAmelCase , 1 , 2 )
A : Dict = np.equal(__UpperCAmelCase , 1 ).astype(np.floataa ).sum()
A : int = np.equal(__UpperCAmelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(__UpperCAmelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class __lowercase ( _SCREAMING_SNAKE_CASE , unittest.TestCase , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = True
UpperCAmelCase_ : Optional[Any] = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
UpperCAmelCase_ : Tuple = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def snake_case ( self ) -> Dict:
A : Tuple = FlaxBlenderbotModelTester(self )
def snake_case ( self ) -> Tuple:
A , A : Tuple = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ) -> Dict:
A , A : int = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ) -> Tuple:
A , A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A : Any = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
A : List[Any] = model_class(__UpperCAmelCase )
@jax.jit
def encode_jitted(__UpperCAmelCase , __UpperCAmelCase=None , **__UpperCAmelCase ):
return model.encode(input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase )
with self.subTest('''JIT Enabled''' ):
A : Dict = encode_jitted(**__UpperCAmelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
A : List[Any] = encode_jitted(**__UpperCAmelCase ).to_tuple()
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) )
for jitted_output, output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case ( self ) -> Optional[Any]:
A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A : int = model_class(__UpperCAmelCase )
A : List[str] = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
A : Any = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
return model.decode(
decoder_input_ids=__UpperCAmelCase , decoder_attention_mask=__UpperCAmelCase , encoder_outputs=__UpperCAmelCase , )
with self.subTest('''JIT Enabled''' ):
A : Union[str, Any] = decode_jitted(**__UpperCAmelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
A : Optional[int] = decode_jitted(**__UpperCAmelCase ).to_tuple()
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) )
for jitted_output, output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def snake_case ( self ) -> Any:
for model_class_name in self.all_model_classes:
A : List[str] = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
A : Tuple = np.ones((1, 1) ) * model.config.eos_token_id
A : Union[str, Any] = model(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' )
@slow
def snake_case ( self ) -> List[str]:
A : Optional[Any] = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25}
A : Any = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
A : str = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=__UpperCAmelCase )
A : Union[str, Any] = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
A : Optional[Any] = ['''Sam''']
A : Dict = tokenizer(__UpperCAmelCase , return_tensors='''jax''' )
A : Dict = model.generate(**__UpperCAmelCase , **__UpperCAmelCase )
A : str = '''Sam is a great name. It means "sun" in Gaelic.'''
A : List[str] = tokenizer.batch_decode(__UpperCAmelCase , **__UpperCAmelCase )
assert generated_txt[0].strip() == tgt_text
| 423
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=7 , __UpperCAmelCase=3 , __UpperCAmelCase=30 , __UpperCAmelCase=4_00 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=True , __UpperCAmelCase=1 / 2_55 , __UpperCAmelCase=True , ) -> List[Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
A : List[Any] = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
A : List[Any] = parent
A : Dict = batch_size
A : Optional[Any] = num_channels
A : Union[str, Any] = min_resolution
A : int = max_resolution
A : Optional[int] = do_resize
A : Dict = size
A : List[Any] = do_normalize
A : int = image_mean
A : List[str] = image_std
A : Optional[int] = do_rescale
A : Any = rescale_factor
A : int = do_pad
def snake_case ( self ) -> List[str]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=False ) -> Optional[Any]:
if not batched:
A : List[Any] = image_inputs[0]
if isinstance(__UpperCAmelCase , Image.Image ):
A , A : Tuple = image.size
else:
A , A : Dict = image.shape[1], image.shape[2]
if w < h:
A : str = int(self.size['''shortest_edge'''] * h / w )
A : Dict = self.size['''shortest_edge''']
elif w > h:
A : Union[str, Any] = self.size['''shortest_edge''']
A : Any = int(self.size['''shortest_edge'''] * w / h )
else:
A : Any = self.size['''shortest_edge''']
A : Union[str, Any] = self.size['''shortest_edge''']
else:
A : Optional[Any] = []
for image in image_inputs:
A , A : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A : Optional[int] = max(__UpperCAmelCase , key=lambda __UpperCAmelCase : item[0] )[0]
A : Union[str, Any] = max(__UpperCAmelCase , key=lambda __UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowercase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Dict = ConditionalDetrImageProcessor if is_vision_available() else None
def snake_case ( self ) -> Optional[int]:
A : str = ConditionalDetrImageProcessingTester(self )
@property
def snake_case ( self ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self ) -> str:
A : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''image_std''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''size''' ) )
def snake_case ( self ) -> List[Any]:
A : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} )
self.assertEqual(image_processor.do_pad , __UpperCAmelCase )
A : Union[str, Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__UpperCAmelCase )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , __UpperCAmelCase )
def snake_case ( self ) -> Dict:
pass
def snake_case ( self ) -> Tuple:
# Initialize image_processing
A : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
A : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
A , A : Tuple = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A , A : Optional[Any] = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase )
A : Any = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case ( self ) -> Optional[Any]:
# Initialize image_processing
A : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
# Test not batched input
A : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
A , A : Optional[int] = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A : Any = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
A , A : Union[str, Any] = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case ( self ) -> List[str]:
# Initialize image_processing
A : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
# Test not batched input
A : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
A , A : Dict = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A : int = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
A , A : Tuple = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def snake_case ( self ) -> Optional[Any]:
# prepare image and target
A : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
A : int = json.loads(f.read() )
A : Tuple = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
A : Tuple = ConditionalDetrImageProcessor.from_pretrained('''microsoft/conditional-detr-resnet-50''' )
A : Union[str, Any] = image_processing(images=__UpperCAmelCase , annotations=__UpperCAmelCase , return_tensors='''pt''' )
# verify pixel values
A : Optional[Any] = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , __UpperCAmelCase )
A : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __UpperCAmelCase , atol=1E-4 ) )
# verify area
A : Optional[Any] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __UpperCAmelCase ) )
# verify boxes
A : List[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __UpperCAmelCase )
A : Tuple = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __UpperCAmelCase , atol=1E-3 ) )
# verify image_id
A : str = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __UpperCAmelCase ) )
# verify is_crowd
A : int = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __UpperCAmelCase ) )
# verify class_labels
A : Tuple = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __UpperCAmelCase ) )
# verify orig_size
A : List[str] = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __UpperCAmelCase ) )
# verify size
A : List[str] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __UpperCAmelCase ) )
@slow
def snake_case ( self ) -> Tuple:
# prepare image, target and masks_path
A : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
A : Optional[Any] = json.loads(f.read() )
A : int = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
A : Dict = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
A : Dict = ConditionalDetrImageProcessor(format='''coco_panoptic''' )
A : List[str] = image_processing(images=__UpperCAmelCase , annotations=__UpperCAmelCase , masks_path=__UpperCAmelCase , return_tensors='''pt''' )
# verify pixel values
A : Any = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , __UpperCAmelCase )
A : Dict = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __UpperCAmelCase , atol=1E-4 ) )
# verify area
A : int = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __UpperCAmelCase ) )
# verify boxes
A : Any = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __UpperCAmelCase )
A : List[Any] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __UpperCAmelCase , atol=1E-3 ) )
# verify image_id
A : int = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __UpperCAmelCase ) )
# verify is_crowd
A : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __UpperCAmelCase ) )
# verify class_labels
A : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __UpperCAmelCase ) )
# verify masks
A : Dict = 82_28_73
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __UpperCAmelCase )
# verify orig_size
A : Tuple = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __UpperCAmelCase ) )
# verify size
A : int = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __UpperCAmelCase ) )
| 423
| 1
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_a )
class _a ( _a ):
'''simple docstring'''
A :Dict = field(default="language-modeling" , metadata={"include_in_asdict_even_if_is_default": True} )
A :int = Features({"text": Value("string" )} )
A :Union[str, Any] = Features({} )
A :Dict = "text"
@property
def _A ( self ):
"""simple docstring"""
return {self.text_column: "text"}
| 191
|
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def _UpperCAmelCase (UpperCamelCase_ : str , UpperCamelCase_ : Dict , UpperCamelCase_ : List[str]=[] ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = size[0] - overlap_pixels * 2
_lowerCAmelCase : List[Any] = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
_lowerCAmelCase : List[str] = np.ones((size_y, size_x) , dtype=np.uinta ) * 255
_lowerCAmelCase : Dict = np.pad(UpperCamelCase_ , mode="""linear_ramp""" , pad_width=UpperCamelCase_ , end_values=0 )
if "l" in remove_borders:
_lowerCAmelCase : Optional[Any] = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
_lowerCAmelCase : List[str] = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
_lowerCAmelCase : str = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
_lowerCAmelCase : List[Any] = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def _UpperCAmelCase (UpperCamelCase_ : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : str ):
'''simple docstring'''
return max(UpperCamelCase_ , min(UpperCamelCase_ , UpperCamelCase_ ) )
def _UpperCAmelCase (UpperCamelCase_ : [int] , UpperCamelCase_ : [int] , UpperCamelCase_ : [int] ):
'''simple docstring'''
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def _UpperCAmelCase (UpperCamelCase_ : [int] , UpperCamelCase_ : int , UpperCamelCase_ : [int] ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = list(UpperCamelCase_ )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
_lowerCAmelCase : int = clamp_rect(UpperCamelCase_ , [0, 0] , [image_size[0], image_size[1]] )
return rect
def _UpperCAmelCase (UpperCamelCase_ : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any] ):
'''simple docstring'''
_lowerCAmelCase : str = Image.new("""RGB""" , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(UpperCamelCase_ , (original_slice, 0) )
return result
def _UpperCAmelCase (UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
_lowerCAmelCase : Tuple = tile.crop(UpperCamelCase_ )
return tile
def _UpperCAmelCase (UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Dict ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = n % d
return n - divisor
class __snake_case (_a ):
def __init__( self : Any , _UpperCAmelCase : AutoencoderKL , _UpperCAmelCase : CLIPTextModel , _UpperCAmelCase : CLIPTokenizer , _UpperCAmelCase : UNetaDConditionModel , _UpperCAmelCase : DDPMScheduler , _UpperCAmelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , _UpperCAmelCase : int = 350 , ) -> Dict:
'''simple docstring'''
super().__init__(
vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , unet=_UpperCAmelCase , low_res_scheduler=_UpperCAmelCase , scheduler=_UpperCAmelCase , max_noise_level=_UpperCAmelCase , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
_lowerCAmelCase : Optional[Any] = add_overlap_rect(_UpperCAmelCase , _UpperCAmelCase , image.size )
_lowerCAmelCase : Any = image.crop(_UpperCAmelCase )
_lowerCAmelCase : Union[str, Any] = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
_lowerCAmelCase : List[Any] = translated_slice_x - (original_image_slice / 2)
_lowerCAmelCase : Dict = max(0 , _UpperCAmelCase )
_lowerCAmelCase : int = squeeze_tile(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase : List[str] = to_input.size
_lowerCAmelCase : Union[str, Any] = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
_lowerCAmelCase : Optional[Any] = super(_UpperCAmelCase , self ).__call__(image=_UpperCAmelCase , **_UpperCAmelCase ).images[0]
_lowerCAmelCase : Dict = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
_lowerCAmelCase : Optional[int] = unsqueeze_tile(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase : List[str] = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
_lowerCAmelCase : Any = []
if x == 0:
remove_borders.append("""l""" )
elif crop_rect[2] == image.size[0]:
remove_borders.append("""r""" )
if y == 0:
remove_borders.append("""t""" )
elif crop_rect[3] == image.size[1]:
remove_borders.append("""b""" )
_lowerCAmelCase : Any = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=_UpperCAmelCase ) , mode="""L""" , )
final_image.paste(
_UpperCAmelCase , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , _UpperCAmelCase )
@torch.no_grad()
def __call__( self : List[Any] , _UpperCAmelCase : Union[str, List[str]] , _UpperCAmelCase : Union[PIL.Image.Image, List[PIL.Image.Image]] , _UpperCAmelCase : int = 75 , _UpperCAmelCase : float = 9.0 , _UpperCAmelCase : int = 50 , _UpperCAmelCase : Optional[Union[str, List[str]]] = None , _UpperCAmelCase : Optional[int] = 1 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : Optional[torch.Generator] = None , _UpperCAmelCase : Optional[torch.FloatTensor] = None , _UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = 128 , _UpperCAmelCase : int = 32 , _UpperCAmelCase : int = 32 , ) -> str:
'''simple docstring'''
_lowerCAmelCase : Optional[int] = Image.new("""RGB""" , (image.size[0] * 4, image.size[1] * 4) )
_lowerCAmelCase : Optional[Any] = math.ceil(image.size[0] / tile_size )
_lowerCAmelCase : List[Any] = math.ceil(image.size[1] / tile_size )
_lowerCAmelCase : List[Any] = tcx * tcy
_lowerCAmelCase : str = 0
for y in range(_UpperCAmelCase ):
for x in range(_UpperCAmelCase ):
self._process_tile(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , prompt=_UpperCAmelCase , num_inference_steps=_UpperCAmelCase , guidance_scale=_UpperCAmelCase , noise_level=_UpperCAmelCase , negative_prompt=_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase , eta=_UpperCAmelCase , generator=_UpperCAmelCase , latents=_UpperCAmelCase , )
current_count += 1
if callback is not None:
callback({"""progress""": current_count / total_tile_count, """image""": final_image} )
return final_image
def _UpperCAmelCase ():
'''simple docstring'''
# Run a demo
_lowerCAmelCase : Tuple = """stabilityai/stable-diffusion-x4-upscaler"""
_lowerCAmelCase : Optional[Any] = StableDiffusionTiledUpscalePipeline.from_pretrained(UpperCamelCase_ , revision="""fp16""" , torch_dtype=torch.floataa )
_lowerCAmelCase : Optional[int] = pipe.to("""cuda""" )
_lowerCAmelCase : int = Image.open("""../../docs/source/imgs/diffusers_library.jpg""" )
def callback(UpperCamelCase_ : Any ):
print(F"progress: {obj['progress']:.4f}" )
obj["image"].save("""diffusers_library_progress.jpg""" )
_lowerCAmelCase : Tuple = pipe(image=UpperCamelCase_ , prompt="""Black font, white background, vector""" , noise_level=40 , callback=UpperCamelCase_ )
final_image.save("""diffusers_library.jpg""" )
if __name__ == "__main__":
main()
| 429
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class _lowercase ( _A ):
_a : Any = 'microsoft/speecht5_tts'
_a : Union[str, Any] = (
'This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '
'text to read (in English) and returns a waveform object containing the sound.'
)
_a : List[Any] = 'text_reader'
_a : Any = SpeechTaProcessor
_a : Dict = SpeechTaForTextToSpeech
_a : Optional[Any] = SpeechTaHifiGan
_a : Optional[int] = ['text']
_a : Optional[Any] = ['audio']
def lowercase__ ( self ):
if self.post_processor is None:
snake_case__ : int ="""microsoft/speecht5_hifigan"""
super().setup()
def lowercase__ ( self , a , a=None ):
snake_case__ : Dict =self.pre_processor(text=a , return_tensors="""pt""" , truncation=a )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
snake_case__ : int =load_dataset("""Matthijs/cmu-arctic-xvectors""" , split="""validation""" )
snake_case__ : Tuple =torch.tensor(embeddings_dataset[7_3_0_5]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def lowercase__ ( self , a ):
with torch.no_grad():
return self.model.generate_speech(**a )
def lowercase__ ( self , a ):
with torch.no_grad():
return self.post_processor(a ).cpu().detach()
| 448
|
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( _a : int , _a : Any , _a : Union[str, Any] , _a : Tuple ):
'''simple docstring'''
snake_case__ : Any =BigBirdConfig.from_json_file(_a )
print(f"Building PyTorch model from configuration: {config}" )
if is_trivia_qa:
snake_case__ : str =BigBirdForQuestionAnswering(_a )
else:
snake_case__ : Optional[int] =BigBirdForPreTraining(_a )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(_a , _a , is_trivia_qa=_a )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(_a )
if __name__ == "__main__":
__lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--big_bird_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_trivia_qa""", action="""store_true""", help="""Whether to convert a model with a trivia_qa head."""
)
__lowerCamelCase : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 448
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.