code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
__magic_name__ = logging.get_logger(__name__)
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
def run_func(UpperCamelCase__ ):
@wraps(UpperCamelCase__ )
def run_in_eager_mode(*UpperCamelCase__ , **UpperCamelCase__ ):
return func(*UpperCamelCase__ , **UpperCamelCase__ )
@wraps(UpperCamelCase__ )
@tf.function(experimental_compile=UpperCamelCase__ )
def run_in_graph_mode(*UpperCamelCase__ , **UpperCamelCase__ ):
return func(*UpperCamelCase__ , **UpperCamelCase__ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = random.Random()
_UpperCAmelCase = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(UpperCamelCase__ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : TensorFlowBenchmarkArguments
lowercase_ : PretrainedConfig
lowercase_ : str = "TensorFlow"
@property
def _a ( self ) -> str:
return tf.__version__
def _a ( self , a_ , a_ , a_ ) -> float:
# initialize GPU on separate process
_UpperCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
_UpperCAmelCase = self._prepare_inference_func(a_ , a_ , a_ )
return self._measure_speed(_inference )
def _a ( self , a_ , a_ , a_ ) -> float:
_UpperCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
_UpperCAmelCase = self._prepare_train_func(a_ , a_ , a_ )
return self._measure_speed(_train )
def _a ( self , a_ , a_ , a_ ) -> [Memory, Optional[MemorySummary]]:
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , a_ )
_UpperCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
_UpperCAmelCase = self._prepare_inference_func(a_ , a_ , a_ )
return self._measure_memory(_inference )
def _a ( self , a_ , a_ , a_ ) -> [Memory, Optional[MemorySummary]]:
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , a_ )
_UpperCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
_UpperCAmelCase = self._prepare_train_func(a_ , a_ , a_ )
return self._measure_memory(_train )
def _a ( self , a_ , a_ , a_ ) -> Callable[[], None]:
_UpperCAmelCase = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
_UpperCAmelCase = (
hasattr(a_ , "architectures" )
and isinstance(config.architectures , a_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_UpperCAmelCase = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
_UpperCAmelCase = __import__("transformers" , fromlist=[model_class] )
_UpperCAmelCase = getattr(a_ , a_ )
_UpperCAmelCase = model_cls(a_ )
except ImportError:
raise ImportError(
f"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
_UpperCAmelCase = TF_MODEL_MAPPING[config.__class__](a_ )
# encoder-decoder has vocab size saved differently
_UpperCAmelCase = config.vocab_size if hasattr(a_ , "vocab_size" ) else config.encoder.vocab_size
_UpperCAmelCase = random_input_ids(a_ , a_ , a_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(a_ , decoder_input_ids=a_ , training=a_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(a_ , training=a_ )
_UpperCAmelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def _a ( self , a_ , a_ , a_ ) -> Callable[[], None]:
_UpperCAmelCase = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`." )
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
_UpperCAmelCase = (
hasattr(a_ , "architectures" )
and isinstance(config.architectures , a_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_UpperCAmelCase = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
_UpperCAmelCase = __import__("transformers" , fromlist=[model_class] )
_UpperCAmelCase = getattr(a_ , a_ )
_UpperCAmelCase = model_cls(a_ )
except ImportError:
raise ImportError(
f"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
_UpperCAmelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](a_ )
# encoder-decoder has vocab size saved differently
_UpperCAmelCase = config.vocab_size if hasattr(a_ , "vocab_size" ) else config.encoder.vocab_size
_UpperCAmelCase = random_input_ids(a_ , a_ , a_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
_UpperCAmelCase = model(a_ , decoder_input_ids=a_ , labels=a_ , training=a_ )[0]
_UpperCAmelCase = tf.gradients(a_ , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
_UpperCAmelCase = model(a_ , labels=a_ , training=a_ )[0]
_UpperCAmelCase = tf.gradients(a_ , model.trainable_variables )
return gradients
_UpperCAmelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def _a ( self , a_ ) -> float:
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("Do inference on TPU. Running model 5 times to stabilize compilation" )
timeit.repeat(a_ , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
_UpperCAmelCase = timeit.repeat(
a_ , repeat=self.args.repeat , number=10 , )
return min(a_ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f"Doesn't fit on GPU. {e}" )
def _a ( self , a_ ) -> [Memory, MemorySummary]:
logger.info(
"Note that TensorFlow allocates more memory than "
"it might need to speed up computation. "
"The memory reported here corresponds to the memory "
"reported by `nvidia-smi`, which can vary depending "
"on total available memory on the GPU that is used." )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"
" consumption line by line." )
_UpperCAmelCase = start_memory_tracing("transformers" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"
" with `args.memory=False`" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"py3nvml not installed, we won't log GPU memory usage. "
"Install py3nvml (pip install py3nvml) to log information about GPU." )
_UpperCAmelCase = "N/A"
else:
logger.info(
"Measuring total GPU usage on GPU device. Make sure to not have additional processes"
" running on the same GPU." )
# init nvml
nvml.nvmlInit()
func()
_UpperCAmelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
_UpperCAmelCase = nvml.nvmlDeviceGetMemoryInfo(a_ )
_UpperCAmelCase = meminfo.used
_UpperCAmelCase = Memory(a_ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"When enabling line by line tracing, the max peak memory for CPU is inaccurate in"
" TensorFlow." )
_UpperCAmelCase = None
else:
_UpperCAmelCase = measure_peak_memory_cpu(a_ )
_UpperCAmelCase = Memory(a_ ) if isinstance(a_ , a_ ) else memory_bytes
if self.args.trace_memory_line_by_line:
_UpperCAmelCase = stop_memory_tracing(a_ )
if memory is None:
_UpperCAmelCase = summary.total
else:
_UpperCAmelCase = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f"Doesn't fit on GPU. {e}" )
return "N/A", None
| 657 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__=10 ):
"""simple docstring"""
_UpperCAmelCase = []
for _ in range(UpperCamelCase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__=10 ):
"""simple docstring"""
_UpperCAmelCase = []
for step in range(UpperCamelCase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = os.path.join(UpperCamelCase__ , "schedule.bin" )
torch.save(scheduler.state_dict() , UpperCamelCase__ )
_UpperCAmelCase = torch.load(UpperCamelCase__ )
scheduler.load_state_dict(UpperCamelCase__ )
return lrs
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def _a ( self , a_ , a_ , a_ ) -> Optional[int]:
self.assertEqual(len(a_ ) , len(a_ ) )
for a, b in zip(a_ , a_ ):
self.assertAlmostEqual(a_ , a_ , delta=a_ )
def _a ( self ) -> str:
_UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=a_ )
_UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
_UpperCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_UpperCAmelCase = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(100 ):
_UpperCAmelCase = criterion(a_ , a_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=a_ )
_UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
_UpperCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_UpperCAmelCase = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=a_ , weight_decay=0.0 , relative_step=a_ , scale_parameter=a_ , warmup_init=a_ , )
for _ in range(1000 ):
_UpperCAmelCase = criterion(a_ , a_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
lowercase_ : List[Any] = nn.Linear(50 , 50 ) if is_torch_available() else None
lowercase_ : Tuple = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
lowercase_ : Dict = 10
def _a ( self , a_ , a_ , a_ , a_=None ) -> Union[str, Any]:
self.assertEqual(len(a_ ) , len(a_ ) )
for a, b in zip(a_ , a_ ):
self.assertAlmostEqual(a_ , a_ , delta=a_ , msg=a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
_UpperCAmelCase = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
_UpperCAmelCase , _UpperCAmelCase = data
_UpperCAmelCase = scheduler_func(self.optimizer , **a_ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
_UpperCAmelCase = unwrap_schedule(a_ , self.num_steps )
self.assertListAlmostEqual(
a_ , a_ , tol=1e-2 , msg=f"failed for {scheduler_func} in normal scheduler" , )
_UpperCAmelCase = scheduler_func(self.optimizer , **a_ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(a_ ) # wrap to test picklability of the schedule
_UpperCAmelCase = unwrap_and_save_reload_schedule(a_ , self.num_steps )
self.assertListEqual(a_ , a_ , msg=f"failed for {scheduler_func} in save and reload" )
class _lowerCAmelCase :
def __init__( self , a_ ) -> Union[str, Any]:
_UpperCAmelCase = fn
def __call__( self , *a_ , **a_ ) -> Union[str, Any]:
return self.fn(*a_ , **a_ )
@classmethod
def _a ( self , a_ ) -> Dict:
_UpperCAmelCase = list(map(self , scheduler.lr_lambdas ) )
| 657 | 1 |
"""simple docstring"""
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class _lowerCAmelCase ( lowerCamelCase ):
def __init__( self , a_ , a_ ) -> List[Any]:
super().__init__()
self.register_modules(unet=a_ , scheduler=a_ )
@torch.no_grad()
def __call__( self , a_ = 1 , a_ = None , a_ = 50 , a_ = "pil" , a_ = True , **a_ , ) -> Union[ImagePipelineOutput, Tuple]:
_UpperCAmelCase = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=a_ , )
_UpperCAmelCase = image.to(self.device )
# set step values
self.scheduler.set_timesteps(a_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_UpperCAmelCase = self.unet(a_ , a_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_UpperCAmelCase = self.scheduler.step(a_ , a_ , a_ ).prev_sample
_UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase = self.numpy_to_pil(a_ )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=a_ ), "This is a local test"
| 657 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __lowerCamelCase ( UpperCamelCase__=None ):
"""simple docstring"""
if subparsers is not None:
_UpperCAmelCase = subparsers.add_parser("test" )
else:
_UpperCAmelCase = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=UpperCamelCase__ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase__ )
return parser
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
_UpperCAmelCase = script_name
else:
_UpperCAmelCase = f"--config_file={args.config_file} {script_name}"
_UpperCAmelCase = ["accelerate-launch"] + test_args.split()
_UpperCAmelCase = execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = test_command_parser()
_UpperCAmelCase = parser.parse_args()
test_command(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 657 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__magic_name__ = {'''configuration_gpt_neox''': ['''GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXConfig''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''GPTNeoXTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXForCausalLM''',
'''GPTNeoXForQuestionAnswering''',
'''GPTNeoXForSequenceClassification''',
'''GPTNeoXForTokenClassification''',
'''GPTNeoXLayer''',
'''GPTNeoXModel''',
'''GPTNeoXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
return 10 - x * x
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if equation(UpperCamelCase__ ) * equation(UpperCamelCase__ ) >= 0:
raise ValueError("Wrong space!" )
_UpperCAmelCase = a
while (b - a) >= 0.01:
# Find middle point
_UpperCAmelCase = (a + b) / 2
# Check if middle point is root
if equation(UpperCamelCase__ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(UpperCamelCase__ ) * equation(UpperCamelCase__ ) < 0:
_UpperCAmelCase = c
else:
_UpperCAmelCase = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 657 | 1 |
"""simple docstring"""
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
__magic_name__ = {
'''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''',
'''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''',
'''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''',
'''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''',
'''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''',
'''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''',
'''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''',
'''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''',
'''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''',
'''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''',
}
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = ["layers", "blocks"]
for k in ignore_keys:
state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = {
'''blocks''': '''layers''',
'''mlp.0''': '''fc1''',
'''mlp.2''': '''fc2''',
'''mlp_ln''': '''final_layer_norm''',
'''.attn.query''': '''.self_attn.q_proj''',
'''.attn.key''': '''.self_attn.k_proj''',
'''.attn.value''': '''.self_attn.v_proj''',
'''.attn_ln''': '''.self_attn_layer_norm''',
'''.attn.out''': '''.self_attn.out_proj''',
'''.cross_attn.query''': '''.encoder_attn.q_proj''',
'''.cross_attn.key''': '''.encoder_attn.k_proj''',
'''.cross_attn.value''': '''.encoder_attn.v_proj''',
'''.cross_attn_ln''': '''.encoder_attn_layer_norm''',
'''.cross_attn.out''': '''.encoder_attn.out_proj''',
'''decoder.ln.''': '''decoder.layer_norm.''',
'''encoder.ln.''': '''encoder.layer_norm.''',
'''token_embedding''': '''embed_tokens''',
'''encoder.positional_embedding''': '''encoder.embed_positions.weight''',
'''decoder.positional_embedding''': '''decoder.embed_positions.weight''',
'''ln_post''': '''layer_norm''',
}
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = list(s_dict.keys() )
for key in keys:
_UpperCAmelCase = key
for k, v in WHISPER_MAPPING.items():
if k in key:
_UpperCAmelCase = new_key.replace(UpperCamelCase__ , UpperCamelCase__ )
print(f"{key} -> {new_key}" )
_UpperCAmelCase = s_dict.pop(UpperCamelCase__ )
return s_dict
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = emb.weight.shape
_UpperCAmelCase = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
_UpperCAmelCase = emb.weight.data
return lin_layer
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
_UpperCAmelCase = os.path.basename(UpperCamelCase__ )
_UpperCAmelCase = url.split("/" )[-2]
_UpperCAmelCase = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
if os.path.exists(UpperCamelCase__ ) and not os.path.isfile(UpperCamelCase__ ):
raise RuntimeError(f"{download_target} exists and is not a regular file" )
if os.path.isfile(UpperCamelCase__ ):
_UpperCAmelCase = open(UpperCamelCase__ , "rb" ).read()
if hashlib.shaaaa(UpperCamelCase__ ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" )
with urllib.request.urlopen(UpperCamelCase__ ) as source, open(UpperCamelCase__ , "wb" ) as output:
with tqdm(
total=int(source.info().get("Content-Length" ) ) , ncols=80 , unit="iB" , unit_scale=UpperCamelCase__ , unit_divisor=1024 ) as loop:
while True:
_UpperCAmelCase = source.read(8192 )
if not buffer:
break
output.write(UpperCamelCase__ )
loop.update(len(UpperCamelCase__ ) )
_UpperCAmelCase = open(UpperCamelCase__ , "rb" ).read()
if hashlib.shaaaa(UpperCamelCase__ ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." )
return model_bytes
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if ".pt" not in checkpoint_path:
_UpperCAmelCase = _download(_MODELS[checkpoint_path] )
else:
_UpperCAmelCase = torch.load(UpperCamelCase__ , map_location="cpu" )
_UpperCAmelCase = original_checkpoint["dims"]
_UpperCAmelCase = original_checkpoint["model_state_dict"]
_UpperCAmelCase = state_dict["decoder.token_embedding.weight"]
remove_ignore_keys_(UpperCamelCase__ )
rename_keys(UpperCamelCase__ )
_UpperCAmelCase = True
_UpperCAmelCase = state_dict["decoder.layers.0.fc1.weight"].shape[0]
_UpperCAmelCase = WhisperConfig(
vocab_size=dimensions["n_vocab"] , encoder_ffn_dim=UpperCamelCase__ , decoder_ffn_dim=UpperCamelCase__ , num_mel_bins=dimensions["n_mels"] , d_model=dimensions["n_audio_state"] , max_target_positions=dimensions["n_text_ctx"] , encoder_layers=dimensions["n_audio_layer"] , encoder_attention_heads=dimensions["n_audio_head"] , decoder_layers=dimensions["n_text_layer"] , decoder_attention_heads=dimensions["n_text_state"] , max_source_positions=dimensions["n_audio_ctx"] , )
_UpperCAmelCase = WhisperForConditionalGeneration(UpperCamelCase__ )
_UpperCAmelCase , _UpperCAmelCase = model.model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0 and not set(UpperCamelCase__ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
f" but all the following weights are missing {missing}" )
if tie_embeds:
_UpperCAmelCase = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
_UpperCAmelCase = proj_out_weights
model.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
__magic_name__ = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 657 |
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
lowercase_ : Tuple = [r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self , a_ , a_ , a_ = None , a_ = 50257 , a_ = 1024 , a_ = 768 , a_ = 12 , a_ = 12 , a_ = None , a_ = "gelu_new" , a_ = 0.1 , a_ = 0.1 , a_ = 0.1 , a_ = 1e-5 , a_ = 0.02 , a_ = True , a_ = True , a_ = False , a_ = False , ) -> List[str]:
super().__init__()
_UpperCAmelCase = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"
f" `n_embd`: {n_embd} are not equal." )
_UpperCAmelCase = prefix_inner_dim
_UpperCAmelCase = prefix_hidden_dim
_UpperCAmelCase = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
_UpperCAmelCase = (
nn.Linear(self.prefix_hidden_dim , a_ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
_UpperCAmelCase = GPTaConfig(
vocab_size=a_ , n_positions=a_ , n_embd=a_ , n_layer=a_ , n_head=a_ , n_inner=a_ , activation_function=a_ , resid_pdrop=a_ , embd_pdrop=a_ , attn_pdrop=a_ , layer_norm_epsilon=a_ , initializer_range=a_ , scale_attn_weights=a_ , use_cache=a_ , scale_attn_by_inverse_layer_idx=a_ , reorder_and_upcast_attn=a_ , )
_UpperCAmelCase = GPTaLMHeadModel(a_ )
def _a ( self , a_ , a_ , a_ = None , a_ = None , ) -> Tuple:
_UpperCAmelCase = self.transformer.transformer.wte(a_ )
_UpperCAmelCase = self.encode_prefix(a_ )
_UpperCAmelCase = self.decode_prefix(a_ )
_UpperCAmelCase = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
_UpperCAmelCase = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
_UpperCAmelCase = torch.cat((dummy_token, input_ids) , dim=1 )
_UpperCAmelCase = self.transformer(inputs_embeds=a_ , labels=a_ , attention_mask=a_ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def _a ( self , a_ , a_ ) -> torch.Tensor:
return torch.zeros(a_ , self.prefix_length , dtype=torch.intaa , device=a_ )
def _a ( self , a_ ) -> Union[str, Any]:
return self.encode_prefix(a_ )
@torch.no_grad()
def _a ( self , a_ , a_ , a_ ) -> Union[str, Any]:
_UpperCAmelCase = torch.split(a_ , 1 , dim=0 )
_UpperCAmelCase = []
_UpperCAmelCase = []
for feature in features:
_UpperCAmelCase = self.decode_prefix(feature.to(a_ ) ) # back to the clip feature
# Only support beam search for now
_UpperCAmelCase , _UpperCAmelCase = self.generate_beam(
input_embeds=a_ , device=a_ , eos_token_id=a_ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
_UpperCAmelCase = torch.stack(a_ )
_UpperCAmelCase = torch.stack(a_ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def _a ( self , a_=None , a_=None , a_=None , a_ = 5 , a_ = 67 , a_ = 1.0 , a_ = None , ) -> Optional[Any]:
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = torch.ones(a_ , device=a_ , dtype=torch.int )
_UpperCAmelCase = torch.zeros(a_ , device=a_ , dtype=torch.bool )
if input_embeds is not None:
_UpperCAmelCase = input_embeds
else:
_UpperCAmelCase = self.transformer.transformer.wte(a_ )
for i in range(a_ ):
_UpperCAmelCase = self.transformer(inputs_embeds=a_ )
_UpperCAmelCase = outputs.logits
_UpperCAmelCase = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
_UpperCAmelCase = logits.softmax(-1 ).log()
if scores is None:
_UpperCAmelCase , _UpperCAmelCase = logits.topk(a_ , -1 )
_UpperCAmelCase = generated.expand(a_ , *generated.shape[1:] )
_UpperCAmelCase , _UpperCAmelCase = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
_UpperCAmelCase = next_tokens
else:
_UpperCAmelCase = tokens.expand(a_ , *tokens.shape[1:] )
_UpperCAmelCase = torch.cat((tokens, next_tokens) , dim=1 )
else:
_UpperCAmelCase = -float(np.inf )
_UpperCAmelCase = 0
_UpperCAmelCase = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
_UpperCAmelCase = scores_sum / seq_lengths[:, None]
_UpperCAmelCase , _UpperCAmelCase = scores_sum_average.view(-1 ).topk(a_ , -1 )
_UpperCAmelCase = next_tokens // scores_sum.shape[1]
_UpperCAmelCase = seq_lengths[next_tokens_source]
_UpperCAmelCase = next_tokens % scores_sum.shape[1]
_UpperCAmelCase = next_tokens.unsqueeze(1 )
_UpperCAmelCase = tokens[next_tokens_source]
_UpperCAmelCase = torch.cat((tokens, next_tokens) , dim=1 )
_UpperCAmelCase = generated[next_tokens_source]
_UpperCAmelCase = scores_sum_average * seq_lengths
_UpperCAmelCase = is_stopped[next_tokens_source]
_UpperCAmelCase = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
_UpperCAmelCase = torch.cat((generated, next_token_embed) , dim=1 )
_UpperCAmelCase = is_stopped + next_tokens.eq(a_ ).squeeze()
if is_stopped.all():
break
_UpperCAmelCase = scores / seq_lengths
_UpperCAmelCase = scores.argsort(descending=a_ )
# tokens tensors are already padded to max_seq_length
_UpperCAmelCase = [tokens[i] for i in order]
_UpperCAmelCase = torch.stack(a_ , dim=0 )
_UpperCAmelCase = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 657 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = '''▁'''
__magic_name__ = {'''vocab_file''': '''spiece.model'''}
__magic_name__ = {
'''vocab_file''': {
'''google/reformer-crime-and-punishment''': (
'''https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'''
)
}
}
__magic_name__ = {
'''google/reformer-crime-and-punishment''': 52_42_88,
}
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : str = VOCAB_FILES_NAMES
lowercase_ : str = PRETRAINED_VOCAB_FILES_MAP
lowercase_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : List[str] = ['''input_ids''', '''attention_mask''']
def __init__( self , a_ , a_="</s>" , a_="<unk>" , a_=[] , a_ = None , **a_ , ) -> None:
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=a_ , unk_token=a_ , additional_special_tokens=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , )
_UpperCAmelCase = vocab_file
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a_ )
@property
def _a ( self ) -> Any:
return self.sp_model.get_piece_size()
def _a ( self ) -> Dict[str, int]:
_UpperCAmelCase = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[Any]:
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
return state
def __setstate__( self , a_ ) -> Dict:
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_UpperCAmelCase = {}
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self , a_ ) -> List[str]:
return self.sp_model.encode(a_ , out_type=a_ )
def _a ( self , a_ ) -> Optional[Any]:
return self.sp_model.piece_to_id(a_ )
def _a ( self , a_ ) -> Tuple:
if index < self.sp_model.get_piece_size():
_UpperCAmelCase = self.sp_model.IdToPiece(a_ )
return token
def _a ( self , a_ ) -> Optional[int]:
_UpperCAmelCase = []
_UpperCAmelCase = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(a_ ) + token
_UpperCAmelCase = []
else:
current_sub_tokens.append(a_ )
out_string += self.sp_model.decode(a_ )
return out_string.strip()
def _a ( self , a_ , a_ = None ) -> Tuple[str]:
if not os.path.isdir(a_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_UpperCAmelCase = os.path.join(
a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a_ )
elif not os.path.isfile(self.vocab_file ):
with open(a_ , "wb" ) as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (out_vocab_file,)
| 657 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__magic_name__ = {'''configuration_gpt_neox''': ['''GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXConfig''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''GPTNeoXTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXForCausalLM''',
'''GPTNeoXForQuestionAnswering''',
'''GPTNeoXForSequenceClassification''',
'''GPTNeoXForTokenClassification''',
'''GPTNeoXLayer''',
'''GPTNeoXModel''',
'''GPTNeoXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
__magic_name__ = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : Union[str, Any] = '''ernie_m'''
lowercase_ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , a_ = 250002 , a_ = 768 , a_ = 12 , a_ = 12 , a_ = 3072 , a_ = "gelu" , a_ = 0.1 , a_ = 0.1 , a_ = 514 , a_ = 0.02 , a_ = 1 , a_ = 1e-05 , a_=None , a_=False , a_=0.0 , **a_ , ) -> Optional[int]:
super().__init__(pad_token_id=a_ , **a_ )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = classifier_dropout
_UpperCAmelCase = is_decoder
_UpperCAmelCase = act_dropout
| 657 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : Union[str, Any] = '''convbert'''
def __init__( self , a_=30522 , a_=768 , a_=12 , a_=12 , a_=3072 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=2 , a_=0.02 , a_=1e-12 , a_=1 , a_=0 , a_=2 , a_=768 , a_=2 , a_=9 , a_=1 , a_=None , **a_ , ) -> Tuple:
super().__init__(
pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ , )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = embedding_size
_UpperCAmelCase = head_ratio
_UpperCAmelCase = conv_kernel_size
_UpperCAmelCase = num_groups
_UpperCAmelCase = classifier_dropout
class _lowerCAmelCase ( lowerCamelCase ):
@property
def _a ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_UpperCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 657 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = SwinvaConfig()
_UpperCAmelCase = swinva_name.split("_" )
_UpperCAmelCase = name_split[1]
if "to" in name_split[3]:
_UpperCAmelCase = int(name_split[3][-3:] )
else:
_UpperCAmelCase = int(name_split[3] )
if "to" in name_split[2]:
_UpperCAmelCase = int(name_split[2][-2:] )
else:
_UpperCAmelCase = int(name_split[2][6:] )
if model_size == "tiny":
_UpperCAmelCase = 96
_UpperCAmelCase = (2, 2, 6, 2)
_UpperCAmelCase = (3, 6, 12, 24)
elif model_size == "small":
_UpperCAmelCase = 96
_UpperCAmelCase = (2, 2, 18, 2)
_UpperCAmelCase = (3, 6, 12, 24)
elif model_size == "base":
_UpperCAmelCase = 128
_UpperCAmelCase = (2, 2, 18, 2)
_UpperCAmelCase = (4, 8, 16, 32)
else:
_UpperCAmelCase = 192
_UpperCAmelCase = (2, 2, 18, 2)
_UpperCAmelCase = (6, 12, 24, 48)
if "to" in swinva_name:
_UpperCAmelCase = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
_UpperCAmelCase = 2_1841
_UpperCAmelCase = "huggingface/label-files"
_UpperCAmelCase = "imagenet-22k-id2label.json"
_UpperCAmelCase = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="dataset" ) , "r" ) )
_UpperCAmelCase = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
else:
_UpperCAmelCase = 1000
_UpperCAmelCase = "huggingface/label-files"
_UpperCAmelCase = "imagenet-1k-id2label.json"
_UpperCAmelCase = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="dataset" ) , "r" ) )
_UpperCAmelCase = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
_UpperCAmelCase = img_size
_UpperCAmelCase = num_classes
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = num_heads
_UpperCAmelCase = window_size
return config
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if "patch_embed.proj" in name:
_UpperCAmelCase = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
_UpperCAmelCase = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
_UpperCAmelCase = "encoder." + name
if "attn.proj" in name:
_UpperCAmelCase = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
_UpperCAmelCase = name.replace("attn" , "attention.self" )
if "norm1" in name:
_UpperCAmelCase = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
_UpperCAmelCase = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
_UpperCAmelCase = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
_UpperCAmelCase = name.replace("mlp.fc2" , "output.dense" )
if "q_bias" in name:
_UpperCAmelCase = name.replace("q_bias" , "query.bias" )
if "k_bias" in name:
_UpperCAmelCase = name.replace("k_bias" , "key.bias" )
if "v_bias" in name:
_UpperCAmelCase = name.replace("v_bias" , "value.bias" )
if "cpb_mlp" in name:
_UpperCAmelCase = name.replace("cpb_mlp" , "continuous_position_bias_mlp" )
if name == "norm.weight":
_UpperCAmelCase = "layernorm.weight"
if name == "norm.bias":
_UpperCAmelCase = "layernorm.bias"
if "head" in name:
_UpperCAmelCase = name.replace("head" , "classifier" )
else:
_UpperCAmelCase = "swinv2." + name
return name
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_UpperCAmelCase = orig_state_dict.pop(UpperCamelCase__ )
if "mask" in key:
continue
elif "qkv" in key:
_UpperCAmelCase = key.split("." )
_UpperCAmelCase = int(key_split[1] )
_UpperCAmelCase = int(key_split[3] )
_UpperCAmelCase = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_UpperCAmelCase = val[:dim, :]
_UpperCAmelCase = val[dim : dim * 2, :]
_UpperCAmelCase = val[-dim:, :]
else:
_UpperCAmelCase = val[:dim]
_UpperCAmelCase = val[
dim : dim * 2
]
_UpperCAmelCase = val[-dim:]
else:
_UpperCAmelCase = val
return orig_state_dict
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = timm.create_model(UpperCamelCase__ , pretrained=UpperCamelCase__ )
timm_model.eval()
_UpperCAmelCase = get_swinva_config(UpperCamelCase__ )
_UpperCAmelCase = SwinvaForImageClassification(UpperCamelCase__ )
model.eval()
_UpperCAmelCase = convert_state_dict(timm_model.state_dict() , UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
_UpperCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCAmelCase = AutoImageProcessor.from_pretrained("microsoft/{}".format(swinva_name.replace("_" , "-" ) ) )
_UpperCAmelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
_UpperCAmelCase = image_processor(images=UpperCamelCase__ , return_tensors="pt" )
_UpperCAmelCase = timm_model(inputs["pixel_values"] )
_UpperCAmelCase = model(**UpperCamelCase__ ).logits
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-3 )
print(f"Saving model {swinva_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCamelCase__ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCamelCase__ )
model.push_to_hub(
repo_path_or_name=Path(UpperCamelCase__ , UpperCamelCase__ ) , organization="nandwalritik" , commit_message="Add model" , )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swinv2_name''',
default='''swinv2_tiny_patch4_window8_256''',
type=str,
help='''Name of the Swinv2 timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__magic_name__ = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 657 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
return "".join([hex(UpperCamelCase__ )[2:].zfill(2 ).upper() for byte in list(UpperCamelCase__ )] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if (len(UpperCamelCase__ ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(UpperCamelCase__ ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(UpperCamelCase__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 | 1 |
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
__magic_name__ = random.Random()
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__=1.0 , UpperCamelCase__=None , UpperCamelCase__=None ):
"""simple docstring"""
if rng is None:
_UpperCAmelCase = global_rng
_UpperCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class _lowerCAmelCase ( unittest.TestCase ):
def __init__( self , a_ , a_=7 , a_=400 , a_=2000 , a_=24 , a_=24 , a_=0.0 , a_=16000 , a_=True , a_=True , ) -> str:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = min_seq_length
_UpperCAmelCase = max_seq_length
_UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_UpperCAmelCase = feature_size
_UpperCAmelCase = num_mel_bins
_UpperCAmelCase = padding_value
_UpperCAmelCase = sampling_rate
_UpperCAmelCase = return_attention_mask
_UpperCAmelCase = do_normalize
def _a ( self ) -> str:
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _a ( self , a_=False , a_=False ) -> List[Any]:
def _flatten(a_ ):
return list(itertools.chain(*a_ ) )
if equal_length:
_UpperCAmelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_UpperCAmelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_UpperCAmelCase = [np.asarray(a_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _lowerCAmelCase ( lowerCamelCase , unittest.TestCase ):
lowercase_ : int = SpeechaTextFeatureExtractor if is_speech_available() else None
def _a ( self ) -> Optional[int]:
_UpperCAmelCase = SpeechaTextFeatureExtractionTester(self )
def _a ( self , a_ ) -> Union[str, Any]:
self.assertTrue(np.all(np.mean(a_ , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(a_ , axis=0 ) - 1 ) < 1e-3 ) )
def _a ( self ) -> Union[str, Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_UpperCAmelCase = [np.asarray(a_ ) for speech_input in speech_inputs]
# Test feature size
_UpperCAmelCase = feature_extractor(a_ , padding=a_ , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
_UpperCAmelCase = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
# Test batched
_UpperCAmelCase = feature_extractor(a_ , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(a_ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(a_ , a_ ):
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_UpperCAmelCase = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_UpperCAmelCase = np.asarray(a_ )
_UpperCAmelCase = feature_extractor(a_ , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(a_ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(a_ , a_ ):
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
def _a ( self ) -> str:
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_UpperCAmelCase = ["longest", "max_length", "do_not_pad"]
_UpperCAmelCase = [None, 16, None]
for max_length, padding in zip(a_ , a_ ):
_UpperCAmelCase = feature_extractor(
a_ , padding=a_ , max_length=a_ , return_attention_mask=a_ )
_UpperCAmelCase = inputs.input_features
_UpperCAmelCase = inputs.attention_mask
_UpperCAmelCase = [np.sum(a_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def _a ( self ) -> Optional[Any]:
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_UpperCAmelCase = ["longest", "max_length", "do_not_pad"]
_UpperCAmelCase = [None, 16, None]
for max_length, padding in zip(a_ , a_ ):
_UpperCAmelCase = feature_extractor(
a_ , max_length=a_ , padding=a_ , return_tensors="np" , return_attention_mask=a_ )
_UpperCAmelCase = inputs.input_features
_UpperCAmelCase = inputs.attention_mask
_UpperCAmelCase = [np.sum(a_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def _a ( self ) -> Optional[Any]:
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_UpperCAmelCase = feature_extractor(
a_ , padding="max_length" , max_length=4 , truncation=a_ , return_tensors="np" , return_attention_mask=a_ , )
_UpperCAmelCase = inputs.input_features
_UpperCAmelCase = inputs.attention_mask
_UpperCAmelCase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def _a ( self ) -> int:
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_UpperCAmelCase = feature_extractor(
a_ , padding="longest" , max_length=4 , truncation=a_ , return_tensors="np" , return_attention_mask=a_ , )
_UpperCAmelCase = inputs.input_features
_UpperCAmelCase = inputs.attention_mask
_UpperCAmelCase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_UpperCAmelCase = feature_extractor(
a_ , padding="longest" , max_length=16 , truncation=a_ , return_tensors="np" , return_attention_mask=a_ , )
_UpperCAmelCase = inputs.input_features
_UpperCAmelCase = inputs.attention_mask
_UpperCAmelCase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def _a ( self ) -> int:
import torch
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = np.random.rand(100 , 32 ).astype(np.floataa )
_UpperCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_UpperCAmelCase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_UpperCAmelCase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _a ( self , a_ ) -> List[str]:
from datasets import load_dataset
_UpperCAmelCase = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
_UpperCAmelCase = ds.sort("id" ).select(range(a_ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def _a ( self ) -> Union[str, Any]:
# fmt: off
_UpperCAmelCase = np.array([
-1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241,
-1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128,
-1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625,
] )
# fmt: on
_UpperCAmelCase = self._load_datasamples(1 )
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = feature_extractor(a_ , return_tensors="pt" ).input_features
self.assertEquals(input_features.shape , (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , a_ , atol=1e-4 ) )
| 657 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
try:
_UpperCAmelCase = float(UpperCamelCase__ )
except ValueError:
raise ValueError("Please enter a valid number" )
_UpperCAmelCase = decimal - int(UpperCamelCase__ )
if fractional_part == 0:
return int(UpperCamelCase__ ), 1
else:
_UpperCAmelCase = len(str(UpperCamelCase__ ).split("." )[1] )
_UpperCAmelCase = int(decimal * (10**number_of_frac_digits) )
_UpperCAmelCase = 10**number_of_frac_digits
_UpperCAmelCase , _UpperCAmelCase = denominator, numerator
while True:
_UpperCAmelCase = dividend % divisor
if remainder == 0:
break
_UpperCAmelCase , _UpperCAmelCase = divisor, remainder
_UpperCAmelCase , _UpperCAmelCase = numerator / divisor, denominator / divisor
return int(UpperCamelCase__ ), int(UpperCamelCase__ )
if __name__ == "__main__":
print(f'''{decimal_to_fraction(2) = }''')
print(f'''{decimal_to_fraction(89.0) = }''')
print(f'''{decimal_to_fraction("67") = }''')
print(f'''{decimal_to_fraction("45.0") = }''')
print(f'''{decimal_to_fraction(1.5) = }''')
print(f'''{decimal_to_fraction("6.25") = }''')
print(f'''{decimal_to_fraction("78td") = }''')
| 657 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
__magic_name__ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__magic_name__ = {
'''vocab_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'''
),
'''google/electra-base-generator''': '''https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt''',
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'''
),
'''google/electra-base-generator''': (
'''https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'''
),
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'''
),
},
}
__magic_name__ = {
'''google/electra-small-generator''': 5_12,
'''google/electra-base-generator''': 5_12,
'''google/electra-large-generator''': 5_12,
'''google/electra-small-discriminator''': 5_12,
'''google/electra-base-discriminator''': 5_12,
'''google/electra-large-discriminator''': 5_12,
}
__magic_name__ = {
'''google/electra-small-generator''': {'''do_lower_case''': True},
'''google/electra-base-generator''': {'''do_lower_case''': True},
'''google/electra-large-generator''': {'''do_lower_case''': True},
'''google/electra-small-discriminator''': {'''do_lower_case''': True},
'''google/electra-base-discriminator''': {'''do_lower_case''': True},
'''google/electra-large-discriminator''': {'''do_lower_case''': True},
}
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : List[Any] = VOCAB_FILES_NAMES
lowercase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowercase_ : List[str] = PRETRAINED_INIT_CONFIGURATION
lowercase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : Any = ElectraTokenizer
def __init__( self , a_=None , a_=None , a_=True , a_="[UNK]" , a_="[SEP]" , a_="[PAD]" , a_="[CLS]" , a_="[MASK]" , a_=True , a_=None , **a_ , ) -> List[Any]:
super().__init__(
a_ , tokenizer_file=a_ , do_lower_case=a_ , unk_token=a_ , sep_token=a_ , pad_token=a_ , cls_token=a_ , mask_token=a_ , tokenize_chinese_chars=a_ , strip_accents=a_ , **a_ , )
_UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , a_ ) != do_lower_case
or normalizer_state.get("strip_accents" , a_ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , a_ ) != tokenize_chinese_chars
):
_UpperCAmelCase = getattr(a_ , normalizer_state.pop("type" ) )
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = strip_accents
_UpperCAmelCase = tokenize_chinese_chars
_UpperCAmelCase = normalizer_class(**a_ )
_UpperCAmelCase = do_lower_case
def _a ( self , a_ , a_=None ) -> int:
_UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _a ( self , a_ , a_ = None ) -> List[int]:
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self , a_ , a_ = None ) -> Tuple[str]:
_UpperCAmelCase = self._tokenizer.model.save(a_ , name=a_ )
return tuple(a_ )
| 657 |
"""simple docstring"""
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_UpperCAmelCase = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
_UpperCAmelCase = f"{src_lang}-{tgt_lang}"
_UpperCAmelCase = f"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n"
model_card_dir.mkdir(parents=UpperCamelCase__ , exist_ok=UpperCamelCase__ )
_UpperCAmelCase = os.path.join(UpperCamelCase__ , "README.md" )
print(f"Generating {path}" )
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as f:
f.write(UpperCamelCase__ )
# make sure we are under the root of the project
__magic_name__ = Path(__file__).resolve().parent.parent.parent
__magic_name__ = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
__magic_name__ = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name)
| 657 | 1 |
"""simple docstring"""
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def __lowerCamelCase ( *UpperCamelCase__ ):
"""simple docstring"""
with open(UpperCamelCase__ , "r" ) as fh:
fcntl.flock(UpperCamelCase__ , fcntl.LOCK_EX )
try:
print(*UpperCamelCase__ )
finally:
fcntl.flock(UpperCamelCase__ , fcntl.LOCK_UN )
__magic_name__ = int(os.environ['''LOCAL_RANK'''])
torch.cuda.set_device(local_rank)
__magic_name__ = torch.device('''cuda''', local_rank)
__magic_name__ = socket.gethostname()
__magic_name__ = f'''[{hostname}-{local_rank}]'''
try:
# test distributed
dist.init_process_group('''nccl''')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__magic_name__ = dist.get_rank()
__magic_name__ = dist.get_world_size()
printflock(f'''{gpu} is OK (global rank: {rank}/{world_size})''')
dist.barrier()
if rank == 0:
printflock(f'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''')
except Exception:
printflock(f'''{gpu} is broken''')
raise
| 657 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _lowerCAmelCase ( metaclass=lowerCamelCase ):
lowercase_ : Dict = ['''torch''', '''torchsde''']
def __init__( self , *a_ , **a_ ) -> Optional[int]:
requires_backends(self , ["torch", "torchsde"] )
@classmethod
def _a ( cls , *a_ , **a_ ) -> Optional[Any]:
requires_backends(cls , ["torch", "torchsde"] )
@classmethod
def _a ( cls , *a_ , **a_ ) -> List[Any]:
requires_backends(cls , ["torch", "torchsde"] )
| 657 | 1 |
"""simple docstring"""
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('''Googling.....''')
__magic_name__ = '''https://www.google.com/search?q=''' + ''' '''.join(sys.argv[1:])
__magic_name__ = requests.get(url, headers={'''UserAgent''': UserAgent().random})
# res.raise_for_status()
with open('''project1a.html''', '''wb''') as out_file: # only for knowing the class
for data in res.iter_content(1_00_00):
out_file.write(data)
__magic_name__ = BeautifulSoup(res.text, '''html.parser''')
__magic_name__ = list(soup.select('''.eZt8xd'''))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('''href'''))
else:
webbrowser.open(f'''https://google.com{link.get("href")}''')
| 657 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
__magic_name__ = logging.get_logger(__name__)
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : Optional[Any] = '''upernet'''
def __init__( self , a_=None , a_=512 , a_=0.02 , a_=[1, 2, 3, 6] , a_=True , a_=0.4 , a_=384 , a_=256 , a_=1 , a_=False , a_=255 , **a_ , ) -> List[Any]:
super().__init__(**a_ )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_UpperCAmelCase = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(a_ , a_ ):
_UpperCAmelCase = backbone_config.get("model_type" )
_UpperCAmelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase = config_class.from_dict(a_ )
_UpperCAmelCase = backbone_config
_UpperCAmelCase = hidden_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = pool_scales
_UpperCAmelCase = use_auxiliary_head
_UpperCAmelCase = auxiliary_loss_weight
_UpperCAmelCase = auxiliary_in_channels
_UpperCAmelCase = auxiliary_channels
_UpperCAmelCase = auxiliary_num_convs
_UpperCAmelCase = auxiliary_concat_input
_UpperCAmelCase = loss_ignore_index
def _a ( self ) -> int:
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.backbone_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
| 657 | 1 |
"""simple docstring"""
import qiskit
def __lowerCamelCase ( UpperCamelCase__ = 2 ):
"""simple docstring"""
_UpperCAmelCase = qubits
# Using Aer's simulator
_UpperCAmelCase = qiskit.Aer.get_backend("aer_simulator" )
# Creating a Quantum Circuit acting on the q register
_UpperCAmelCase = qiskit.QuantumCircuit(UpperCamelCase__ , UpperCamelCase__ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , UpperCamelCase__ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , UpperCamelCase__ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(UpperCamelCase__ ) ) , list(range(UpperCamelCase__ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
_UpperCAmelCase = qiskit.execute(UpperCamelCase__ , UpperCamelCase__ , shots=1000 )
return job.result().get_counts(UpperCamelCase__ )
if __name__ == "__main__":
print(f'''Total count for various states are: {quantum_entanglement(3)}''')
| 657 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import _LazyModule
__magic_name__ = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 657 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {
'''configuration_xlm_roberta''': [
'''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaConfig''',
'''XLMRobertaOnnxConfig''',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''XLMRobertaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''XLMRobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaForCausalLM''',
'''XLMRobertaForMaskedLM''',
'''XLMRobertaForMultipleChoice''',
'''XLMRobertaForQuestionAnswering''',
'''XLMRobertaForSequenceClassification''',
'''XLMRobertaForTokenClassification''',
'''XLMRobertaModel''',
'''XLMRobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMRobertaForCausalLM''',
'''TFXLMRobertaForMaskedLM''',
'''TFXLMRobertaForMultipleChoice''',
'''TFXLMRobertaForQuestionAnswering''',
'''TFXLMRobertaForSequenceClassification''',
'''TFXLMRobertaForTokenClassification''',
'''TFXLMRobertaModel''',
'''TFXLMRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxXLMRobertaForMaskedLM''',
'''FlaxXLMRobertaForCausalLM''',
'''FlaxXLMRobertaForMultipleChoice''',
'''FlaxXLMRobertaForQuestionAnswering''',
'''FlaxXLMRobertaForSequenceClassification''',
'''FlaxXLMRobertaForTokenClassification''',
'''FlaxXLMRobertaModel''',
'''FlaxXLMRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 |
"""simple docstring"""
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCAmelCase :
def __init__( self , a_ , a_=2 , a_=3 , a_=4 , a_=2 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=99 , a_=36 , a_=3 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=16 , a_=2 , a_=0.02 , a_=6 , a_=6 , a_=3 , a_=4 , a_=None , a_=1000 , ) -> Optional[Any]:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = text_seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = coordinate_size
_UpperCAmelCase = shape_size
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_UpperCAmelCase = text_seq_length
_UpperCAmelCase = (image_size // patch_size) ** 2 + 1
_UpperCAmelCase = self.text_seq_length + self.image_seq_length
def _a ( self ) -> Dict:
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_UpperCAmelCase = bbox[i, j, 3]
_UpperCAmelCase = bbox[i, j, 1]
_UpperCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_UpperCAmelCase = bbox[i, j, 2]
_UpperCAmelCase = bbox[i, j, 0]
_UpperCAmelCase = t
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.text_seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_UpperCAmelCase = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Tuple:
_UpperCAmelCase = LayoutLMvaModel(config=a_ )
model.to(a_ )
model.eval()
# text + image
_UpperCAmelCase = model(a_ , pixel_values=a_ )
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ )
_UpperCAmelCase = model(a_ , bbox=a_ , pixel_values=a_ , token_type_ids=a_ )
_UpperCAmelCase = model(a_ , bbox=a_ , pixel_values=a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_UpperCAmelCase = model(a_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_UpperCAmelCase = model(pixel_values=a_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Optional[Any]:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = LayoutLMvaForSequenceClassification(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Union[str, Any]:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = LayoutLMvaForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Dict:
_UpperCAmelCase = LayoutLMvaForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , start_positions=a_ , end_positions=a_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self ) -> Optional[int]:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase_ : Any = False
lowercase_ : Dict = False
lowercase_ : List[str] = False
lowercase_ : str = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase_ : int = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def _a ( self , a_ , a_ , a_ , a_ , a_ ) -> List[str]:
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = LayoutLMvaModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=a_ , hidden_size=37 )
def _a ( self , a_ , a_ , a_=False ) -> List[str]:
_UpperCAmelCase = copy.deepcopy(a_ )
if model_class in get_values(a_ ):
_UpperCAmelCase = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(a_ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(a_ ):
_UpperCAmelCase = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=a_ )
elif model_class in get_values(a_ ):
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
elif model_class in [
*get_values(a_ ),
]:
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
elif model_class in [
*get_values(a_ ),
]:
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=a_ , )
return inputs_dict
def _a ( self ) -> int:
self.config_tester.run_common_tests()
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase = type
self.model_tester.create_and_check_model(*a_ )
def _a ( self ) -> int:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a_ )
def _a ( self ) -> Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a_ )
@slow
def _a ( self ) -> List[str]:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = LayoutLMvaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _a ( self ) -> List[Any]:
return LayoutLMvaImageProcessor(apply_ocr=a_ ) if is_vision_available() else None
@slow
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(a_ )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=a_ , return_tensors="pt" ).pixel_values.to(a_ )
_UpperCAmelCase = torch.tensor([[1, 2]] )
_UpperCAmelCase = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
_UpperCAmelCase = model(
input_ids=input_ids.to(a_ ) , bbox=bbox.to(a_ ) , pixel_values=pixel_values.to(a_ ) , )
# verify the logits
_UpperCAmelCase = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , a_ )
_UpperCAmelCase = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , a_ , atol=1e-4 ) )
| 657 | 1 |
"""simple docstring"""
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
__magic_name__ = parse(importlib.metadata.version('''torch'''))
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(f"`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}" )
_UpperCAmelCase = STR_OPERATION_TO_FUNC[operation]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_UpperCAmelCase = parse(importlib.metadata.version(UpperCamelCase__ ) )
return operation(UpperCamelCase__ , parse(UpperCamelCase__ ) )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
return compare_versions(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
| 657 |
"""simple docstring"""
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _lowerCAmelCase ( unittest.TestCase ):
lowercase_ : str = MODEL_FOR_MASKED_LM_MAPPING
lowercase_ : List[str] = TF_MODEL_FOR_MASKED_LM_MAPPING
def _a ( self ) -> Optional[Any]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def _a ( self ) -> str:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf" )
_UpperCAmelCase = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is grouped", "score": 2.1e-05, "token": 38015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-05, "token": 25506, "token_str": " accuser"},
] , )
_UpperCAmelCase = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-05,
"token": 38015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-05,
"token": 25506,
"token_str": " accuser",
},
] , )
_UpperCAmelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-05, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt" )
_UpperCAmelCase = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is Maul", "score": 2.2e-05, "token": 35676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
_UpperCAmelCase = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
_UpperCAmelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is Patrick", "score": 2.1e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-05, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
] , )
_UpperCAmelCase = unmasker("My name is <mask> <mask>" , top_k=2 )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def _a ( self ) -> int:
_UpperCAmelCase = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt" )
# convert model to fp16
pipe.model.half()
_UpperCAmelCase = pipe("Paris is the [MASK] of France." )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(a_ , a_ )
@slow
@require_torch
def _a ( self ) -> int:
_UpperCAmelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt" )
self.run_large_test(a_ )
@slow
@require_tf
def _a ( self ) -> int:
_UpperCAmelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf" )
self.run_large_test(a_ )
def _a ( self , a_ ) -> int:
_UpperCAmelCase = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(a_ ) , [
{"sequence": "My name is John", "score": 0.008, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.007, "token": 1573, "token_str": " Chris"},
] , )
_UpperCAmelCase = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(a_ ) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.251,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.214,
"token": 12790,
"token_str": " Lyon",
},
] , )
_UpperCAmelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(a_ ) , [
{"sequence": "My name is Patrick", "score": 0.005, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.000, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.000, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def _a ( self ) -> Any:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt" )
_UpperCAmelCase = None
_UpperCAmelCase = None
self.run_pipeline_test(a_ , [] )
@require_tf
def _a ( self ) -> List[Any]:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf" )
_UpperCAmelCase = None
_UpperCAmelCase = None
self.run_pipeline_test(a_ , [] )
def _a ( self , a_ , a_ , a_ ) -> Optional[Any]:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)" )
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = [
f"This is another {tokenizer.mask_token} test",
]
return fill_masker, examples
def _a ( self , a_ , a_ ) -> List[str]:
_UpperCAmelCase = fill_masker.tokenizer
_UpperCAmelCase = fill_masker.model
_UpperCAmelCase = fill_masker(
f"This is a {tokenizer.mask_token}" , )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = fill_masker([f"This is a {tokenizer.mask_token}"] )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = fill_masker([f"This is a {tokenizer.mask_token}", f"Another {tokenizer.mask_token} great test."] )
self.assertEqual(
a_ , [
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
] , )
with self.assertRaises(a_ ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(a_ ):
fill_masker("This is" )
self.run_test_top_k(a_ , a_ )
self.run_test_targets(a_ , a_ )
self.run_test_top_k_targets(a_ , a_ )
self.fill_mask_with_duplicate_targets_and_top_k(a_ , a_ )
self.fill_mask_with_multiple_masks(a_ , a_ )
def _a ( self , a_ , a_ ) -> Optional[int]:
_UpperCAmelCase = tokenizer.get_vocab()
_UpperCAmelCase = sorted(vocab.keys() )[:2]
# Pipeline argument
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ , targets=a_ )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , a_ )
_UpperCAmelCase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(a_ ) )
# Call argument
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=a_ )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , a_ )
_UpperCAmelCase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(a_ ) )
# Score equivalence
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=a_ )
_UpperCAmelCase = [top_mask["token_str"] for top_mask in outputs]
_UpperCAmelCase = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(a_ ) == set(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=a_ )
_UpperCAmelCase = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(a_ ) , nested_simplify(a_ ) )
# Raises with invalid
with self.assertRaises(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=[""] )
with self.assertRaises(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets="" )
def _a ( self , a_ , a_ ) -> str:
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ , top_k=2 )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
self.assertEqual(nested_simplify(a_ ) , nested_simplify(a_ ) )
def _a ( self , a_ , a_ ) -> List[Any]:
_UpperCAmelCase = tokenizer.get_vocab()
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
# top_k=2, ntargets=3
_UpperCAmelCase = sorted(vocab.keys() )[:3]
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=2 , targets=a_ )
# If we use the most probably targets, and filter differently, we should still
# have the same results
_UpperCAmelCase = [el["token_str"] for el in sorted(a_ , key=lambda a_ : x["score"] , reverse=a_ )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(a_ ).issubset(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=3 , targets=a_ )
# They should yield exactly the same result
self.assertEqual(nested_simplify(a_ ) , nested_simplify(a_ ) )
def _a ( self , a_ , a_ ) -> Optional[Any]:
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = tokenizer.get_vocab()
# String duplicates + id duplicates
_UpperCAmelCase = sorted(vocab.keys() )[:3]
_UpperCAmelCase = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_UpperCAmelCase = fill_masker(f"My name is {tokenizer.mask_token}" , targets=a_ , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(a_ ) , 3 )
def _a ( self , a_ , a_ ) -> Any:
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = fill_masker(
f"This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
a_ , [
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
] , )
| 657 | 1 |
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
__magic_name__ = '''
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
'''
__magic_name__ = '''
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{\'f1\': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results[\'f1\'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results[\'f1\'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")
>>> print(round(results[\'f1\'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'f1\': array([0.8, 0. , 0. ])}
'''
__magic_name__ = '''
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
def _a ( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"] , )
def _a ( self , a_ , a_ , a_=None , a_=1 , a_="binary" , a_=None ) -> Union[str, Any]:
_UpperCAmelCase = fa_score(
a_ , a_ , labels=a_ , pos_label=a_ , average=a_ , sample_weight=a_ )
return {"f1": float(a_ ) if score.size == 1 else score}
| 657 |
"""simple docstring"""
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class _lowerCAmelCase ( lowerCamelCase ):
def _a ( self ) -> List[str]:
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def _a ( self ) -> Optional[int]:
with self.assertRaises(a_ ):
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def _a ( self ) -> int:
with self.assertRaises(a_ ):
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("bool" ) , type=Value("int64" ) ) )
def _a ( self ) -> Optional[Any]:
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _a ( self ) -> int:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , type=Value("int64" ) ) )
def _a ( self ) -> Dict:
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , try_type=Value("int64" ) ) )
self.assertEqual(arr.type , pa.string() )
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def _a ( self ) -> Tuple:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , type=ArrayaD((1, 3) , "int64" ) ) )
def _a ( self ) -> str:
_UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def _a ( self ) -> Tuple:
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def _a ( self ) -> List[str]:
import PIL.Image
_UpperCAmelCase = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"datasets.arrow_writer.cast_to_python_objects" , side_effect=a_ ) as mock_cast_to_python_objects:
_UpperCAmelCase = pa.array(TypedSequence([{"path": None, "bytes": B"image_bytes"}, pil_image] , type=Image() ) )
_UpperCAmelCase , _UpperCAmelCase = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("optimize_list_casting" , a_ )
self.assertFalse(kwargs["optimize_list_casting"] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferReader(UpperCamelCase__ ) if isinstance(UpperCamelCase__ , pa.Buffer ) else pa.memory_map(UpperCamelCase__ )
_UpperCAmelCase = pa.ipc.open_stream(UpperCamelCase__ )
_UpperCAmelCase = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = Features({"labels": ClassLabel(names=["neg", "pos"] )} )
with ArrowWriter(stream=UpperCamelCase__ , features=UpperCamelCase__ ) as writer:
writer.write({"labels": 0} )
writer.write({"labels": 1} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pa.ipc.open_stream(UpperCamelCase__ )
_UpperCAmelCase = f.read_all()
_UpperCAmelCase = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(UpperCamelCase__ )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ , hash_salt="split_name" , check_duplicates=UpperCamelCase__ , ) as writer:
with pytest.raises(UpperCamelCase__ ):
writer.write({"col_1": "foo", "col_2": 1} , key=[1, 2] )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ , hash_salt="split_name" , check_duplicates=UpperCamelCase__ , ) as writer:
with pytest.raises(UpperCamelCase__ ):
writer.write({"col_1": "foo", "col_2": 1} , key=10 )
writer.write({"col_1": "bar", "col_2": 2} , key=10 )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ , hash_salt="split_name" , check_duplicates=UpperCamelCase__ , ) as writer:
writer.write({"col_1": "foo", "col_2": 1} , key=1 )
writer.write({"col_1": "bar", "col_2": 2} , key=2 )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
writer.write_batch({"col_1": [], "col_2": []} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write_table(pa.Table.from_pydict({"col_1": ["foo", "bar"], "col_2": [1, 2]} ) )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write_row(pa.Table.from_pydict({"col_1": ["foo"], "col_2": [1]} ) )
writer.write_row(pa.Table.from_pydict({"col_1": ["bar"], "col_2": [2]} ) )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __lowerCamelCase ( ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
_UpperCAmelCase = os.path.join(UpperCamelCase__ , "test.arrow" )
with ArrowWriter(path=UpperCamelCase__ , schema=pa.schema(UpperCamelCase__ ) ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(UpperCamelCase__ , 1 )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if pa.types.is_list(UpperCamelCase__ ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if isinstance(lst[0] , UpperCamelCase__ ):
change_first_primitive_element_in_list(lst[0] , UpperCamelCase__ )
else:
_UpperCAmelCase = value
@pytest.mark.parametrize("optimized_int_type, expected_dtype" , [(None, pa.intaa()), (Value("int32" ), pa.intaa())] )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.array(TypedSequence(UpperCamelCase__ , optimized_int_type=UpperCamelCase__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"col, expected_dtype" , [
("attention_mask", pa.inta()),
("special_tokens_mask", pa.inta()),
("token_type_ids", pa.inta()),
("input_ids", pa.intaa()),
("other", pa.intaa()),
] , )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.array(OptimizedTypedSequence(UpperCamelCase__ , col=UpperCamelCase__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
_UpperCAmelCase = copy.deepcopy(UpperCamelCase__ )
_UpperCAmelCase = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase = pa.array(OptimizedTypedSequence(UpperCamelCase__ , col=UpperCamelCase__ ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("raise_exception" , [False, True] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = str(tmp_path / "dataset-train.arrow" )
try:
with ArrowWriter(path=UpperCamelCase__ ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = "mock://dataset-train.arrow"
with ArrowWriter(path=UpperCamelCase__ , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(UpperCamelCase__ ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(UpperCamelCase__ )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(stream=UpperCamelCase__ ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pq.read_table(UpperCamelCase__ )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("embed_local_files" , [False, True] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
import PIL.Image
_UpperCAmelCase = str(tmp_path / "test_image_rgb.jpg" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(UpperCamelCase__ , format="png" )
_UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(
stream=UpperCamelCase__ , features=Features({"image": Image()} ) , embed_local_files=UpperCamelCase__ ) as writer:
writer.write({"image": image_path} )
writer.finalize()
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pq.read_table(UpperCamelCase__ )
_UpperCAmelCase = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["image"][0]["path"] , UpperCamelCase__ )
with open(UpperCamelCase__ , "rb" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = pa.schema([pa.field("col_1" , pa.string() , nullable=UpperCamelCase__ )] )
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(stream=UpperCamelCase__ ) as writer:
writer._build_writer(inferred_schema=UpperCamelCase__ )
assert writer._schema == pa.schema([pa.field("col_1" , pa.string() )] )
| 657 | 1 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ = 10 , UpperCamelCase__ = 1000 , UpperCamelCase__ = True ):
"""simple docstring"""
assert (
isinstance(UpperCamelCase__ , UpperCamelCase__ )
and isinstance(UpperCamelCase__ , UpperCamelCase__ )
and isinstance(UpperCamelCase__ , UpperCamelCase__ )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)" )
return min_val if option else max_val
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
return int((number_a + number_a) / 2 )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
assert (
isinstance(UpperCamelCase__ , UpperCamelCase__ ) and isinstance(UpperCamelCase__ , UpperCamelCase__ ) and isinstance(UpperCamelCase__ , UpperCamelCase__ )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)" )
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value" )
def answer(UpperCamelCase__ ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started..." )
_UpperCAmelCase = lower
_UpperCAmelCase = higher
_UpperCAmelCase = []
while True:
_UpperCAmelCase = get_avg(UpperCamelCase__ , UpperCamelCase__ )
last_numbers.append(UpperCamelCase__ )
if answer(UpperCamelCase__ ) == "low":
_UpperCAmelCase = number
elif answer(UpperCamelCase__ ) == "high":
_UpperCAmelCase = number
else:
break
print(f"guess the number : {last_numbers[-1]}" )
print(f"details : {last_numbers!s}" )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = int(input("Enter lower value : " ).strip() )
_UpperCAmelCase = int(input("Enter high value : " ).strip() )
_UpperCAmelCase = int(input("Enter value to guess : " ).strip() )
guess_the_number(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 657 |
"""simple docstring"""
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class _lowerCAmelCase ( unittest.TestCase ):
def _a ( self ) -> Optional[Any]:
_UpperCAmelCase = ["a", "b", "c"]
# Defaults to last layer if both are None
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(a_ , a_ , a_ )
self.assertEqual(a_ , ["c"] )
self.assertEqual(a_ , [2] )
# Out indices set to match out features
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(["a", "c"] , a_ , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features set to match out indices
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(a_ , [0, 2] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features selected from negative indices
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(a_ , [-3, -1] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [-3, -1] )
def _a ( self ) -> Optional[int]:
# Stage names must be set
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , a_ )
# Out features must be a list
with self.assertRaises(a_ ):
verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] )
# Out features must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] )
# Out indices must be a list or tuple
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , 0 , ["a", "b"] )
# Out indices must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , (0, 1) , ["a"] )
# Out features and out indices must be the same length
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] )
# Out features should match out indices
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] )
# Out features and out indices should be in order
with self.assertRaises(a_ ):
verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] )
# Check passes with valid inputs
verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] )
def _a ( self ) -> int:
_UpperCAmelCase = BackboneMixin()
_UpperCAmelCase = ["a", "b", "c"]
_UpperCAmelCase = ["a", "c"]
_UpperCAmelCase = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
_UpperCAmelCase = ["a", "b"]
self.assertEqual(backbone.out_features , ["a", "b"] )
self.assertEqual(backbone.out_indices , [0, 1] )
_UpperCAmelCase = [-3, -1]
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 657 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( lowerCamelCase , unittest.TestCase ):
lowercase_ : str = OpenAIGPTTokenizer
lowercase_ : str = OpenAIGPTTokenizerFast
lowercase_ : Any = True
lowercase_ : int = False
def _a ( self ) -> Union[str, Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCAmelCase = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
_UpperCAmelCase = dict(zip(a_ , range(len(a_ ) ) ) )
_UpperCAmelCase = ["#version: 0.2", "l o", "lo w", "e r</w>", ""]
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(a_ ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(a_ ) )
def _a ( self , a_ ) -> List[str]:
return "lower newer", "lower newer"
def _a ( self ) -> Optional[Any]:
_UpperCAmelCase = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
_UpperCAmelCase = "lower"
_UpperCAmelCase = ["low", "er</w>"]
_UpperCAmelCase = tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
_UpperCAmelCase = tokens + ["<unk>"]
_UpperCAmelCase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , a_ )
def _a ( self , a_=15 ) -> str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(a_ , **a_ )
# Simple input
_UpperCAmelCase = "This is a simple input"
_UpperCAmelCase = ["This is a simple input 1", "This is a simple input 2"]
_UpperCAmelCase = ("This is a simple input", "This is a pair")
_UpperCAmelCase = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(a_ , tokenizer_r.encode , a_ , max_length=a_ , padding="max_length" )
# Simple input
self.assertRaises(a_ , tokenizer_r.encode_plus , a_ , max_length=a_ , padding="max_length" )
# Simple input
self.assertRaises(
a_ , tokenizer_r.batch_encode_plus , a_ , max_length=a_ , padding="max_length" , )
# Pair input
self.assertRaises(a_ , tokenizer_r.encode , a_ , max_length=a_ , padding="max_length" )
# Pair input
self.assertRaises(a_ , tokenizer_r.encode_plus , a_ , max_length=a_ , padding="max_length" )
# Pair input
self.assertRaises(
a_ , tokenizer_r.batch_encode_plus , a_ , max_length=a_ , padding="max_length" , )
def _a ( self ) -> Union[str, Any]:
pass
@require_ftfy
@require_spacy
@require_tokenizers
class _lowerCAmelCase ( lowerCamelCase ):
pass
| 657 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ = {
'''configuration_roc_bert''': ['''ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoCBertConfig'''],
'''tokenization_roc_bert''': ['''RoCBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoCBertForCausalLM''',
'''RoCBertForMaskedLM''',
'''RoCBertForMultipleChoice''',
'''RoCBertForPreTraining''',
'''RoCBertForQuestionAnswering''',
'''RoCBertForSequenceClassification''',
'''RoCBertForTokenClassification''',
'''RoCBertLayer''',
'''RoCBertModel''',
'''RoCBertPreTrainedModel''',
'''load_tf_weights_in_roc_bert''',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 |
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class _lowerCAmelCase ( lowerCamelCase , unittest.TestCase ):
lowercase_ : Tuple = BarthezTokenizer
lowercase_ : List[Any] = BarthezTokenizerFast
lowercase_ : Dict = True
lowercase_ : int = True
def _a ( self ) -> Any:
super().setUp()
_UpperCAmelCase = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=a_ )
_UpperCAmelCase = tokenizer
def _a ( self ) -> List[Any]:
_UpperCAmelCase = "<pad>"
_UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(a_ ) , 101122 )
def _a ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def _a ( self ) -> List[Any]:
_UpperCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_UpperCAmelCase = [0, 57, 3018, 70307, 91, 2]
_UpperCAmelCase = self.tokenizer(
a_ , max_length=len(a_ ) , padding=a_ , truncation=a_ , return_tensors="pt" )
self.assertIsInstance(a_ , a_ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
_UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(a_ , a_ )
def _a ( self ) -> str:
if not self.test_rust_tokenizer:
return
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = "I was born in 92000, and this is falsé."
_UpperCAmelCase = tokenizer.tokenize(a_ )
_UpperCAmelCase = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
_UpperCAmelCase = tokenizer.encode(a_ , add_special_tokens=a_ )
_UpperCAmelCase = rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = tokenizer.encode(a_ )
_UpperCAmelCase = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
@slow
def _a ( self ) -> Dict:
# fmt: off
_UpperCAmelCase = {"input_ids": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_UpperCAmelCase = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=a_ , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=a_ , )
| 657 | 1 |
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''')
__magic_name__ = logging.getLogger(__name__)
@dataclass
class _lowerCAmelCase :
lowercase_ : Optional[int] = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
lowercase_ : bool = field(
default=lowerCamelCase , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
lowercase_ : bool = field(
default=lowerCamelCase , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
lowercase_ : Optional[int] = field(
default=lowerCamelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
lowercase_ : Optional[int] = field(
default=lowerCamelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
lowercase_ : Optional[int] = field(
default=lowerCamelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of prediction examples to this '''
'''value if set.'''
)
} , )
@dataclass
class _lowerCAmelCase :
lowercase_ : str = field(
default=lowerCamelCase , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowercase_ : str = field(
default=lowerCamelCase , metadata={'''help''': '''Evaluation language. Also train language if `train_language` is set to None.'''} )
lowercase_ : Optional[str] = field(
default=lowerCamelCase , metadata={'''help''': '''Train language if it is different from the evaluation language.'''} )
lowercase_ : Optional[str] = field(
default=lowerCamelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowercase_ : Optional[str] = field(
default=lowerCamelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
lowercase_ : Optional[str] = field(
default=lowerCamelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
lowercase_ : Optional[bool] = field(
default=lowerCamelCase , metadata={'''help''': '''arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'''} , )
lowercase_ : bool = field(
default=lowerCamelCase , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
lowercase_ : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
lowercase_ : bool = field(
default=lowerCamelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
lowercase_ : bool = field(
default=lowerCamelCase , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_xnli" , UpperCamelCase__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_UpperCAmelCase = training_args.get_process_log_level()
logger.setLevel(UpperCamelCase__ )
datasets.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
_UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
_UpperCAmelCase = load_dataset(
"xnli" , model_args.language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
_UpperCAmelCase = load_dataset(
"xnli" , model_args.train_language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
_UpperCAmelCase = train_dataset.features["label"].names
if training_args.do_eval:
_UpperCAmelCase = load_dataset(
"xnli" , model_args.language , split="validation" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
_UpperCAmelCase = eval_dataset.features["label"].names
if training_args.do_predict:
_UpperCAmelCase = load_dataset(
"xnli" , model_args.language , split="test" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
_UpperCAmelCase = predict_dataset.features["label"].names
# Labels
_UpperCAmelCase = len(UpperCamelCase__ )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCamelCase__ , idalabel={str(UpperCamelCase__ ): label for i, label in enumerate(UpperCamelCase__ )} , labelaid={label: i for i, label in enumerate(UpperCamelCase__ )} , finetuning_task="xnli" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
_UpperCAmelCase = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_UpperCAmelCase = False
def preprocess_function(UpperCamelCase__ ):
# Tokenize the texts
return tokenizer(
examples["premise"] , examples["hypothesis"] , padding=UpperCamelCase__ , max_length=data_args.max_seq_length , truncation=UpperCamelCase__ , )
if training_args.do_train:
if data_args.max_train_samples is not None:
_UpperCAmelCase = min(len(UpperCamelCase__ ) , data_args.max_train_samples )
_UpperCAmelCase = train_dataset.select(range(UpperCamelCase__ ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
_UpperCAmelCase = train_dataset.map(
UpperCamelCase__ , batched=UpperCamelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on train dataset" , )
# Log a few random samples from the training set:
for index in random.sample(range(len(UpperCamelCase__ ) ) , 3 ):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}." )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_UpperCAmelCase = min(len(UpperCamelCase__ ) , data_args.max_eval_samples )
_UpperCAmelCase = eval_dataset.select(range(UpperCamelCase__ ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
_UpperCAmelCase = eval_dataset.map(
UpperCamelCase__ , batched=UpperCamelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on validation dataset" , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
_UpperCAmelCase = min(len(UpperCamelCase__ ) , data_args.max_predict_samples )
_UpperCAmelCase = predict_dataset.select(range(UpperCamelCase__ ) )
with training_args.main_process_first(desc="prediction dataset map pre-processing" ):
_UpperCAmelCase = predict_dataset.map(
UpperCamelCase__ , batched=UpperCamelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on prediction dataset" , )
# Get the metric function
_UpperCAmelCase = evaluate.load("xnli" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(UpperCamelCase__ ):
_UpperCAmelCase = p.predictions[0] if isinstance(p.predictions , UpperCamelCase__ ) else p.predictions
_UpperCAmelCase = np.argmax(UpperCamelCase__ , axis=1 )
return metric.compute(predictions=UpperCamelCase__ , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_UpperCAmelCase = default_data_collator
elif training_args.fpaa:
_UpperCAmelCase = DataCollatorWithPadding(UpperCamelCase__ , pad_to_multiple_of=8 )
else:
_UpperCAmelCase = None
# Initialize our Trainer
_UpperCAmelCase = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=UpperCamelCase__ , tokenizer=UpperCamelCase__ , data_collator=UpperCamelCase__ , )
# Training
if training_args.do_train:
_UpperCAmelCase = None
if training_args.resume_from_checkpoint is not None:
_UpperCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCAmelCase = last_checkpoint
_UpperCAmelCase = trainer.train(resume_from_checkpoint=UpperCamelCase__ )
_UpperCAmelCase = train_result.metrics
_UpperCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCamelCase__ )
)
_UpperCAmelCase = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train" , UpperCamelCase__ )
trainer.save_metrics("train" , UpperCamelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_UpperCAmelCase = trainer.evaluate(eval_dataset=UpperCamelCase__ )
_UpperCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCamelCase__ )
_UpperCAmelCase = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.log_metrics("eval" , UpperCamelCase__ )
trainer.save_metrics("eval" , UpperCamelCase__ )
# Prediction
if training_args.do_predict:
logger.info("*** Predict ***" )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = trainer.predict(UpperCamelCase__ , metric_key_prefix="predict" )
_UpperCAmelCase = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(UpperCamelCase__ )
)
_UpperCAmelCase = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.log_metrics("predict" , UpperCamelCase__ )
trainer.save_metrics("predict" , UpperCamelCase__ )
_UpperCAmelCase = np.argmax(UpperCamelCase__ , axis=1 )
_UpperCAmelCase = os.path.join(training_args.output_dir , "predictions.txt" )
if trainer.is_world_process_zero():
with open(UpperCamelCase__ , "w" ) as writer:
writer.write("index\tprediction\n" )
for index, item in enumerate(UpperCamelCase__ ):
_UpperCAmelCase = label_list[item]
writer.write(f"{index}\t{item}\n" )
if __name__ == "__main__":
main()
| 657 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_UpperCAmelCase = f"Input value of [number={number}] must be an integer"
raise TypeError(UpperCamelCase__ )
if number < 0:
return False
_UpperCAmelCase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 | 1 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
__magic_name__ = logging.getLogger(__name__)
@dataclass
class _lowerCAmelCase :
lowercase_ : str
lowercase_ : List[str]
lowercase_ : Optional[List[str]]
@dataclass
class _lowerCAmelCase :
lowercase_ : List[int]
lowercase_ : List[int]
lowercase_ : Optional[List[int]] = None
lowercase_ : Optional[List[int]] = None
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : str = '''train'''
lowercase_ : Optional[int] = '''dev'''
lowercase_ : Union[str, Any] = '''test'''
class _lowerCAmelCase :
@staticmethod
def _a ( a_ , a_ ) -> List[InputExample]:
raise NotImplementedError
@staticmethod
def _a ( a_ ) -> List[str]:
raise NotImplementedError
@staticmethod
def _a ( a_ , a_ , a_ , a_ , a_=False , a_="[CLS]" , a_=1 , a_="[SEP]" , a_=False , a_=False , a_=0 , a_=0 , a_=-100 , a_=0 , a_=True , ) -> List[InputFeatures]:
_UpperCAmelCase = {label: i for i, label in enumerate(a_ )}
_UpperCAmelCase = []
for ex_index, example in enumerate(a_ ):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" , a_ , len(a_ ) )
_UpperCAmelCase = []
_UpperCAmelCase = []
for word, label in zip(example.words , example.labels ):
_UpperCAmelCase = tokenizer.tokenize(a_ )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(a_ ) > 0:
tokens.extend(a_ )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(a_ ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
_UpperCAmelCase = tokenizer.num_special_tokens_to_add()
if len(a_ ) > max_seq_length - special_tokens_count:
_UpperCAmelCase = tokens[: (max_seq_length - special_tokens_count)]
_UpperCAmelCase = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
_UpperCAmelCase = [sequence_a_segment_id] * len(a_ )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
_UpperCAmelCase = [cls_token] + tokens
_UpperCAmelCase = [pad_token_label_id] + label_ids
_UpperCAmelCase = [cls_token_segment_id] + segment_ids
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(a_ )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
_UpperCAmelCase = [1 if mask_padding_with_zero else 0] * len(a_ )
# Zero-pad up to the sequence length.
_UpperCAmelCase = max_seq_length - len(a_ )
if pad_on_left:
_UpperCAmelCase = ([pad_token] * padding_length) + input_ids
_UpperCAmelCase = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
_UpperCAmelCase = ([pad_token_segment_id] * padding_length) + segment_ids
_UpperCAmelCase = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(a_ ) == max_seq_length
assert len(a_ ) == max_seq_length
assert len(a_ ) == max_seq_length
assert len(a_ ) == max_seq_length
if ex_index < 5:
logger.info("*** Example ***" )
logger.info("guid: %s" , example.guid )
logger.info("tokens: %s" , " ".join([str(a_ ) for x in tokens] ) )
logger.info("input_ids: %s" , " ".join([str(a_ ) for x in input_ids] ) )
logger.info("input_mask: %s" , " ".join([str(a_ ) for x in input_mask] ) )
logger.info("segment_ids: %s" , " ".join([str(a_ ) for x in segment_ids] ) )
logger.info("label_ids: %s" , " ".join([str(a_ ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
_UpperCAmelCase = None
features.append(
InputFeatures(
input_ids=a_ , attention_mask=a_ , token_type_ids=a_ , label_ids=a_ ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : List[InputFeatures]
lowercase_ : int = nn.CrossEntropyLoss().ignore_index
def __init__( self , a_ , a_ , a_ , a_ , a_ , a_ = None , a_=False , a_ = Split.train , ) -> Optional[int]:
# Load data features from cache or dataset file
_UpperCAmelCase = os.path.join(
a_ , "cached_{}_{}_{}".format(mode.value , tokenizer.__class__.__name__ , str(a_ ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_UpperCAmelCase = cached_features_file + ".lock"
with FileLock(a_ ):
if os.path.exists(a_ ) and not overwrite_cache:
logger.info(f"Loading features from cached file {cached_features_file}" )
_UpperCAmelCase = torch.load(a_ )
else:
logger.info(f"Creating features from dataset file at {data_dir}" )
_UpperCAmelCase = token_classification_task.read_examples_from_file(a_ , a_ )
# TODO clean up all this to leverage built-in features of tokenizers
_UpperCAmelCase = token_classification_task.convert_examples_to_features(
a_ , a_ , a_ , a_ , cls_token_at_end=bool(model_type in ["xlnet"] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["xlnet"] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=a_ , pad_on_left=bool(tokenizer.padding_side == "left" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f"Saving features into cached file {cached_features_file}" )
torch.save(self.features , a_ )
def __len__( self ) -> Optional[int]:
return len(self.features )
def __getitem__( self , a_ ) -> InputFeatures:
return self.features[i]
if is_tf_available():
import tensorflow as tf
class _lowerCAmelCase :
lowercase_ : List[InputFeatures]
lowercase_ : int = -100
def __init__( self , a_ , a_ , a_ , a_ , a_ , a_ = None , a_=False , a_ = Split.train , ) -> List[str]:
_UpperCAmelCase = token_classification_task.read_examples_from_file(a_ , a_ )
# TODO clean up all this to leverage built-in features of tokenizers
_UpperCAmelCase = token_classification_task.convert_examples_to_features(
a_ , a_ , a_ , a_ , cls_token_at_end=bool(model_type in ["xlnet"] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["xlnet"] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=a_ , pad_on_left=bool(tokenizer.padding_side == "left" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
_UpperCAmelCase = tf.data.Dataset.from_generator(
a_ , ({"input_ids": tf.intaa, "attention_mask": tf.intaa}, tf.intaa) , (
{"input_ids": tf.TensorShape([None] ), "attention_mask": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
_UpperCAmelCase = tf.data.Dataset.from_generator(
a_ , ({"input_ids": tf.intaa, "attention_mask": tf.intaa, "token_type_ids": tf.intaa}, tf.intaa) , (
{
"input_ids": tf.TensorShape([None] ),
"attention_mask": tf.TensorShape([None] ),
"token_type_ids": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def _a ( self ) -> int:
_UpperCAmelCase = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self ) -> List[str]:
return len(self.features )
def __getitem__( self , a_ ) -> InputFeatures:
return self.features[i]
| 657 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = Dict[str, Any]
__magic_name__ = List[Prediction]
@add_end_docstrings(lowerCamelCase )
class _lowerCAmelCase ( lowerCamelCase ):
def __init__( self , *a_ , **a_ ) -> Optional[int]:
super().__init__(*a_ , **a_ )
if self.framework == "tf":
raise ValueError(f"The {self.__class__} is only available in PyTorch." )
requires_backends(self , "vision" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def _a ( self , **a_ ) -> List[str]:
_UpperCAmelCase = {}
if "threshold" in kwargs:
_UpperCAmelCase = kwargs["threshold"]
return {}, {}, postprocess_kwargs
def __call__( self , *a_ , **a_ ) -> Union[Predictions, List[Prediction]]:
return super().__call__(*a_ , **a_ )
def _a ( self , a_ ) -> Optional[Any]:
_UpperCAmelCase = load_image(a_ )
_UpperCAmelCase = torch.IntTensor([[image.height, image.width]] )
_UpperCAmelCase = self.image_processor(images=[image] , return_tensors="pt" )
if self.tokenizer is not None:
_UpperCAmelCase = self.tokenizer(text=inputs["words"] , boxes=inputs["boxes"] , return_tensors="pt" )
_UpperCAmelCase = target_size
return inputs
def _a ( self , a_ ) -> Optional[Any]:
_UpperCAmelCase = model_inputs.pop("target_size" )
_UpperCAmelCase = self.model(**a_ )
_UpperCAmelCase = outputs.__class__({"target_size": target_size, **outputs} )
if self.tokenizer is not None:
_UpperCAmelCase = model_inputs["bbox"]
return model_outputs
def _a ( self , a_ , a_=0.9 ) -> int:
_UpperCAmelCase = model_outputs["target_size"]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
_UpperCAmelCase , _UpperCAmelCase = target_size[0].tolist()
def unnormalize(a_ ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
_UpperCAmelCase , _UpperCAmelCase = model_outputs["logits"].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
_UpperCAmelCase = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
_UpperCAmelCase = [unnormalize(a_ ) for bbox in model_outputs["bbox"].squeeze(0 )]
_UpperCAmelCase = ["score", "label", "box"]
_UpperCAmelCase = [dict(zip(a_ , a_ ) ) for vals in zip(scores.tolist() , a_ , a_ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
_UpperCAmelCase = self.image_processor.post_process_object_detection(a_ , a_ , a_ )
_UpperCAmelCase = raw_annotations[0]
_UpperCAmelCase = raw_annotation["scores"]
_UpperCAmelCase = raw_annotation["labels"]
_UpperCAmelCase = raw_annotation["boxes"]
_UpperCAmelCase = scores.tolist()
_UpperCAmelCase = [self.model.config.idalabel[label.item()] for label in labels]
_UpperCAmelCase = [self._get_bounding_box(a_ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
_UpperCAmelCase = ["score", "label", "box"]
_UpperCAmelCase = [
dict(zip(a_ , a_ ) )
for vals in zip(raw_annotation["scores"] , raw_annotation["labels"] , raw_annotation["boxes"] )
]
return annotation
def _a ( self , a_ ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError("The ObjectDetectionPipeline is only available in PyTorch." )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = box.int().tolist()
_UpperCAmelCase = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 657 | 1 |
"""simple docstring"""
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
__magic_name__ = logging.getLogger()
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = {}
_UpperCAmelCase = os.path.join(UpperCamelCase__ , "all_results.json" )
if os.path.exists(UpperCamelCase__ ):
with open(UpperCamelCase__ , "r" ) as f:
_UpperCAmelCase = json.load(UpperCamelCase__ )
else:
raise ValueError(f"can't find {path}" )
return results
__magic_name__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class _lowerCAmelCase ( lowerCamelCase ):
def _a ( self ) -> str:
import xla_spawn
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = f"\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split()
with patch.object(a_ , "argv" , a_ ):
_UpperCAmelCase = time()
xla_spawn.main()
_UpperCAmelCase = time()
_UpperCAmelCase = get_results(a_ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def _a ( self ) -> List[str]:
import xla_spawn
_UpperCAmelCase = "\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n ".split()
with patch.object(a_ , "argv" , a_ ):
xla_spawn.main()
| 657 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def merge(UpperCamelCase__ , UpperCamelCase__ ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(UpperCamelCase__ ) <= 1:
return collection
_UpperCAmelCase = len(UpperCamelCase__ ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__magic_name__ = input('''Enter numbers separated by a comma:\n''').strip()
__magic_name__ = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 657 | 1 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__magic_name__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( lowerCamelCase , unittest.TestCase ):
lowercase_ : str = XLNetTokenizer
lowercase_ : Optional[int] = XLNetTokenizerFast
lowercase_ : Tuple = True
lowercase_ : Optional[Any] = True
def _a ( self ) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase = XLNetTokenizer(a_ , keep_accents=a_ )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def _a ( self ) -> int:
_UpperCAmelCase = "<s>"
_UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ )
def _a ( self ) -> Tuple:
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "<eod>" )
self.assertEqual(len(a_ ) , 1006 )
def _a ( self ) -> str:
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _a ( self ) -> Tuple:
_UpperCAmelCase = XLNetTokenizer(a_ , keep_accents=a_ )
_UpperCAmelCase = tokenizer.tokenize("This is a test" )
self.assertListEqual(a_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , [285, 46, 10, 170, 382] )
_UpperCAmelCase = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
a_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(a_ )
self.assertListEqual(a_ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(a_ )
self.assertListEqual(
a_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def _a ( self ) -> Tuple:
_UpperCAmelCase = XLNetTokenizer(a_ , do_lower_case=a_ )
_UpperCAmelCase = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
a_ , [
SPIECE_UNDERLINE + "",
"i",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"se",
".",
] , )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["▁he", "ll", "o"] )
def _a ( self ) -> List[str]:
_UpperCAmelCase = XLNetTokenizer(a_ , do_lower_case=a_ )
_UpperCAmelCase = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
a_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"se",
".",
] , )
@slow
def _a ( self ) -> int:
_UpperCAmelCase = XLNetTokenizer.from_pretrained("xlnet-base-cased" )
_UpperCAmelCase = tokenizer.encode("sequence builders" , add_special_tokens=a_ )
_UpperCAmelCase = tokenizer.encode("multi-sequence build" , add_special_tokens=a_ )
_UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(a_ )
_UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(a_ , a_ )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def _a ( self ) -> str:
# fmt: off
_UpperCAmelCase = {"input_ids": [[17, 21442, 270, 17, 10, 14645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 22018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 14431, 13, 5500, 11, 1176, 580, 13, 16819, 4797, 23, 17, 10, 17135, 658, 19, 457, 7932, 13, 184, 19, 3154, 17135, 6468, 19, 1404, 12269, 19, 4229, 5356, 16264, 46, 19, 17, 20545, 10395, 9, 9, 9, 11, 28, 6421, 9531, 20729, 17, 10, 353, 17022, 11, 21, 6421, 9531, 16949, 17, 10, 11509, 753, 11, 33, 95, 2421, 7385, 956, 14431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 24738, 19, 13203, 658, 218, 787, 21, 430, 18482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 22178, 27, 1064, 22, 956, 13, 11101, 1429, 5854, 24313, 18953, 40, 422, 24366, 68, 1758, 37, 10483, 14257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 13894, 3380, 23, 95, 18, 17634, 2288, 9, 4, 3]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a_ , model_name="xlnet-base-cased" , revision="c841166438c31ec7ca9a106dee7bb312b73ae511" , )
| 657 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _lowerCAmelCase :
def __init__( self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ) -> List[str]:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = self.vocab_size - 1
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
_UpperCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> Optional[int]:
_UpperCAmelCase = OpenAIGPTModel(config=a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(a_ , token_type_ids=a_ , head_mask=a_ )
_UpperCAmelCase = model(a_ , token_type_ids=a_ )
_UpperCAmelCase = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> List[Any]:
_UpperCAmelCase = OpenAIGPTLMHeadModel(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> Optional[Any]:
_UpperCAmelCase = OpenAIGPTDoubleHeadsModel(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> Dict:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = OpenAIGPTForSequenceClassification(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase_ : Any = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowercase_ : Optional[Any] = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowercase_ : Union[str, Any] = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _a ( self , a_ , a_ , a_ , a_ , a_ ) -> Any:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _a ( self , a_ , a_ , a_=False ) -> Optional[int]:
_UpperCAmelCase = super()._prepare_for_class(a_ , a_ , return_labels=a_ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=a_ , )
_UpperCAmelCase = inputs_dict["labels"]
_UpperCAmelCase = inputs_dict["labels"]
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=a_ , )
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
return inputs_dict
def _a ( self ) -> Optional[int]:
_UpperCAmelCase = OpenAIGPTModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=a_ , n_embd=37 )
def _a ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _a ( self ) -> Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*a_ )
def _a ( self ) -> Tuple:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*a_ )
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*a_ )
@slow
def _a ( self ) -> int:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = OpenAIGPTModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def _a ( self ) -> Any:
_UpperCAmelCase = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" )
model.to(a_ )
_UpperCAmelCase = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=a_ ) # the president is
_UpperCAmelCase = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
_UpperCAmelCase = model.generate(a_ , do_sample=a_ )
self.assertListEqual(output_ids[0].tolist() , a_ )
| 657 | 1 |
"""simple docstring"""
import operator as op
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = lambda UpperCamelCase__ , UpperCamelCase__ : int(x / y ) # noqa: E731 integer division operation
_UpperCAmelCase = {
"^": op.pow,
"*": op.mul,
"/": div,
"+": op.add,
"-": op.sub,
} # operators & their respective operation
# print table header
print("Symbol".center(8 ) , "Action".center(12 ) , "Stack" , sep=" | " )
print("-" * (30 + len(UpperCamelCase__ )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(UpperCamelCase__ ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ("push(" + x + ")").ljust(12 ) , ",".join(UpperCamelCase__ ) , sep=" | " )
else:
_UpperCAmelCase = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + b + ")").ljust(12 ) , ",".join(UpperCamelCase__ ) , sep=" | " )
_UpperCAmelCase = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + a + ")").ljust(12 ) , ",".join(UpperCamelCase__ ) , sep=" | " )
stack.append(
str(opr[x](int(UpperCamelCase__ ) , int(UpperCamelCase__ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ("push(" + a + x + b + ")").ljust(12 ) , ",".join(UpperCamelCase__ ) , sep=" | " , )
return int(stack[0] )
if __name__ == "__main__":
__magic_name__ = input('''\n\nEnter a Postfix Equation (space separated) = ''').split(''' ''')
print('''\n\tResult = ''', solve(Postfix))
| 657 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__=10 ):
"""simple docstring"""
_UpperCAmelCase = []
for _ in range(UpperCamelCase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__=10 ):
"""simple docstring"""
_UpperCAmelCase = []
for step in range(UpperCamelCase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = os.path.join(UpperCamelCase__ , "schedule.bin" )
torch.save(scheduler.state_dict() , UpperCamelCase__ )
_UpperCAmelCase = torch.load(UpperCamelCase__ )
scheduler.load_state_dict(UpperCamelCase__ )
return lrs
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def _a ( self , a_ , a_ , a_ ) -> Optional[int]:
self.assertEqual(len(a_ ) , len(a_ ) )
for a, b in zip(a_ , a_ ):
self.assertAlmostEqual(a_ , a_ , delta=a_ )
def _a ( self ) -> str:
_UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=a_ )
_UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
_UpperCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_UpperCAmelCase = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(100 ):
_UpperCAmelCase = criterion(a_ , a_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=a_ )
_UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
_UpperCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_UpperCAmelCase = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=a_ , weight_decay=0.0 , relative_step=a_ , scale_parameter=a_ , warmup_init=a_ , )
for _ in range(1000 ):
_UpperCAmelCase = criterion(a_ , a_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
lowercase_ : List[Any] = nn.Linear(50 , 50 ) if is_torch_available() else None
lowercase_ : Tuple = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
lowercase_ : Dict = 10
def _a ( self , a_ , a_ , a_ , a_=None ) -> Union[str, Any]:
self.assertEqual(len(a_ ) , len(a_ ) )
for a, b in zip(a_ , a_ ):
self.assertAlmostEqual(a_ , a_ , delta=a_ , msg=a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
_UpperCAmelCase = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
_UpperCAmelCase , _UpperCAmelCase = data
_UpperCAmelCase = scheduler_func(self.optimizer , **a_ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
_UpperCAmelCase = unwrap_schedule(a_ , self.num_steps )
self.assertListAlmostEqual(
a_ , a_ , tol=1e-2 , msg=f"failed for {scheduler_func} in normal scheduler" , )
_UpperCAmelCase = scheduler_func(self.optimizer , **a_ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(a_ ) # wrap to test picklability of the schedule
_UpperCAmelCase = unwrap_and_save_reload_schedule(a_ , self.num_steps )
self.assertListEqual(a_ , a_ , msg=f"failed for {scheduler_func} in save and reload" )
class _lowerCAmelCase :
def __init__( self , a_ ) -> Union[str, Any]:
_UpperCAmelCase = fn
def __call__( self , *a_ , **a_ ) -> Union[str, Any]:
return self.fn(*a_ , **a_ )
@classmethod
def _a ( self , a_ ) -> Dict:
_UpperCAmelCase = list(map(self , scheduler.lr_lambdas ) )
| 657 | 1 |
"""simple docstring"""
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = multiprocessing.Manager()
_UpperCAmelCase = manager.list()
_UpperCAmelCase = multiprocessing.Process(target=UpperCamelCase__ , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append("timed out" )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
_UpperCAmelCase = shutil.rmtree
_UpperCAmelCase = os.rmdir
_UpperCAmelCase = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
_UpperCAmelCase = {}
with swallow_io():
with time_limit(UpperCamelCase__ ):
exec(UpperCamelCase__ , UpperCamelCase__ )
result.append("passed" )
except TimeoutException:
result.append("timed out" )
except BaseException as e:
result.append(f"failed: {e}" )
# Needed for cleaning up.
_UpperCAmelCase = rmtree
_UpperCAmelCase = rmdir
_UpperCAmelCase = chdir
@contextlib.contextmanager
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def signal_handler(UpperCamelCase__ , UpperCamelCase__ ):
raise TimeoutException("Timed out!" )
signal.setitimer(signal.ITIMER_REAL , UpperCamelCase__ )
signal.signal(signal.SIGALRM , UpperCamelCase__ )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = WriteOnlyStringIO()
with contextlib.redirect_stdout(UpperCamelCase__ ):
with contextlib.redirect_stderr(UpperCamelCase__ ):
with redirect_stdin(UpperCamelCase__ ):
yield
@contextlib.contextmanager
def __lowerCamelCase ( ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as dirname:
with chdir(UpperCamelCase__ ):
yield dirname
class _lowerCAmelCase ( lowerCamelCase ):
pass
class _lowerCAmelCase ( io.StringIO ):
def _a ( self , *a_ , **a_ ) -> List[Any]:
raise OSError
def _a ( self , *a_ , **a_ ) -> str:
raise OSError
def _a ( self , *a_ , **a_ ) -> Tuple:
raise OSError
def _a ( self , *a_ , **a_ ) -> List[Any]:
return False
class _lowerCAmelCase ( contextlib._RedirectStream ): # type: ignore
lowercase_ : Tuple = '''stdin'''
@contextlib.contextmanager
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if root == ".":
yield
return
_UpperCAmelCase = os.getcwd()
os.chdir(UpperCamelCase__ )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(UpperCamelCase__ )
def __lowerCamelCase ( UpperCamelCase__=None ):
"""simple docstring"""
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
_UpperCAmelCase = None
_UpperCAmelCase = None
import os
_UpperCAmelCase = "1"
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
import shutil
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
import subprocess
_UpperCAmelCase = None # type: ignore
_UpperCAmelCase = None
import sys
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
| 657 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __lowerCamelCase ( UpperCamelCase__=None ):
"""simple docstring"""
if subparsers is not None:
_UpperCAmelCase = subparsers.add_parser("test" )
else:
_UpperCAmelCase = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=UpperCamelCase__ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase__ )
return parser
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
_UpperCAmelCase = script_name
else:
_UpperCAmelCase = f"--config_file={args.config_file} {script_name}"
_UpperCAmelCase = ["accelerate-launch"] + test_args.split()
_UpperCAmelCase = execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = test_command_parser()
_UpperCAmelCase = parser.parse_args()
test_command(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 657 | 1 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(UpperCamelCase__ , int(b / 2 ) ) * actual_power(UpperCamelCase__ , int(b / 2 ) )
else:
return a * actual_power(UpperCamelCase__ , int(b / 2 ) ) * actual_power(UpperCamelCase__ , int(b / 2 ) )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if b < 0:
return 1 / actual_power(UpperCamelCase__ , UpperCamelCase__ )
return actual_power(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
print(power(-2, -3))
| 657 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
return 10 - x * x
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if equation(UpperCamelCase__ ) * equation(UpperCamelCase__ ) >= 0:
raise ValueError("Wrong space!" )
_UpperCAmelCase = a
while (b - a) >= 0.01:
# Find middle point
_UpperCAmelCase = (a + b) / 2
# Check if middle point is root
if equation(UpperCamelCase__ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(UpperCamelCase__ ) * equation(UpperCamelCase__ ) < 0:
_UpperCAmelCase = c
else:
_UpperCAmelCase = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 657 | 1 |
"""simple docstring"""
from math import factorial
__magic_name__ = {str(d): factorial(d) for d in range(10)}
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
return sum(DIGIT_FACTORIAL[d] for d in str(UpperCamelCase__ ) )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , UpperCamelCase__ ) if sum_of_digit_factorial(UpperCamelCase__ ) == i )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 657 |
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
lowercase_ : Tuple = [r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self , a_ , a_ , a_ = None , a_ = 50257 , a_ = 1024 , a_ = 768 , a_ = 12 , a_ = 12 , a_ = None , a_ = "gelu_new" , a_ = 0.1 , a_ = 0.1 , a_ = 0.1 , a_ = 1e-5 , a_ = 0.02 , a_ = True , a_ = True , a_ = False , a_ = False , ) -> List[str]:
super().__init__()
_UpperCAmelCase = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"
f" `n_embd`: {n_embd} are not equal." )
_UpperCAmelCase = prefix_inner_dim
_UpperCAmelCase = prefix_hidden_dim
_UpperCAmelCase = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
_UpperCAmelCase = (
nn.Linear(self.prefix_hidden_dim , a_ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
_UpperCAmelCase = GPTaConfig(
vocab_size=a_ , n_positions=a_ , n_embd=a_ , n_layer=a_ , n_head=a_ , n_inner=a_ , activation_function=a_ , resid_pdrop=a_ , embd_pdrop=a_ , attn_pdrop=a_ , layer_norm_epsilon=a_ , initializer_range=a_ , scale_attn_weights=a_ , use_cache=a_ , scale_attn_by_inverse_layer_idx=a_ , reorder_and_upcast_attn=a_ , )
_UpperCAmelCase = GPTaLMHeadModel(a_ )
def _a ( self , a_ , a_ , a_ = None , a_ = None , ) -> Tuple:
_UpperCAmelCase = self.transformer.transformer.wte(a_ )
_UpperCAmelCase = self.encode_prefix(a_ )
_UpperCAmelCase = self.decode_prefix(a_ )
_UpperCAmelCase = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
_UpperCAmelCase = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
_UpperCAmelCase = torch.cat((dummy_token, input_ids) , dim=1 )
_UpperCAmelCase = self.transformer(inputs_embeds=a_ , labels=a_ , attention_mask=a_ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def _a ( self , a_ , a_ ) -> torch.Tensor:
return torch.zeros(a_ , self.prefix_length , dtype=torch.intaa , device=a_ )
def _a ( self , a_ ) -> Union[str, Any]:
return self.encode_prefix(a_ )
@torch.no_grad()
def _a ( self , a_ , a_ , a_ ) -> Union[str, Any]:
_UpperCAmelCase = torch.split(a_ , 1 , dim=0 )
_UpperCAmelCase = []
_UpperCAmelCase = []
for feature in features:
_UpperCAmelCase = self.decode_prefix(feature.to(a_ ) ) # back to the clip feature
# Only support beam search for now
_UpperCAmelCase , _UpperCAmelCase = self.generate_beam(
input_embeds=a_ , device=a_ , eos_token_id=a_ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
_UpperCAmelCase = torch.stack(a_ )
_UpperCAmelCase = torch.stack(a_ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def _a ( self , a_=None , a_=None , a_=None , a_ = 5 , a_ = 67 , a_ = 1.0 , a_ = None , ) -> Optional[Any]:
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = torch.ones(a_ , device=a_ , dtype=torch.int )
_UpperCAmelCase = torch.zeros(a_ , device=a_ , dtype=torch.bool )
if input_embeds is not None:
_UpperCAmelCase = input_embeds
else:
_UpperCAmelCase = self.transformer.transformer.wte(a_ )
for i in range(a_ ):
_UpperCAmelCase = self.transformer(inputs_embeds=a_ )
_UpperCAmelCase = outputs.logits
_UpperCAmelCase = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
_UpperCAmelCase = logits.softmax(-1 ).log()
if scores is None:
_UpperCAmelCase , _UpperCAmelCase = logits.topk(a_ , -1 )
_UpperCAmelCase = generated.expand(a_ , *generated.shape[1:] )
_UpperCAmelCase , _UpperCAmelCase = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
_UpperCAmelCase = next_tokens
else:
_UpperCAmelCase = tokens.expand(a_ , *tokens.shape[1:] )
_UpperCAmelCase = torch.cat((tokens, next_tokens) , dim=1 )
else:
_UpperCAmelCase = -float(np.inf )
_UpperCAmelCase = 0
_UpperCAmelCase = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
_UpperCAmelCase = scores_sum / seq_lengths[:, None]
_UpperCAmelCase , _UpperCAmelCase = scores_sum_average.view(-1 ).topk(a_ , -1 )
_UpperCAmelCase = next_tokens // scores_sum.shape[1]
_UpperCAmelCase = seq_lengths[next_tokens_source]
_UpperCAmelCase = next_tokens % scores_sum.shape[1]
_UpperCAmelCase = next_tokens.unsqueeze(1 )
_UpperCAmelCase = tokens[next_tokens_source]
_UpperCAmelCase = torch.cat((tokens, next_tokens) , dim=1 )
_UpperCAmelCase = generated[next_tokens_source]
_UpperCAmelCase = scores_sum_average * seq_lengths
_UpperCAmelCase = is_stopped[next_tokens_source]
_UpperCAmelCase = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
_UpperCAmelCase = torch.cat((generated, next_token_embed) , dim=1 )
_UpperCAmelCase = is_stopped + next_tokens.eq(a_ ).squeeze()
if is_stopped.all():
break
_UpperCAmelCase = scores / seq_lengths
_UpperCAmelCase = scores.argsort(descending=a_ )
# tokens tensors are already padded to max_seq_length
_UpperCAmelCase = [tokens[i] for i in order]
_UpperCAmelCase = torch.stack(a_ , dim=0 )
_UpperCAmelCase = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 657 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__magic_name__ = {'''configuration_gpt_neox''': ['''GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXConfig''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''GPTNeoXTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXForCausalLM''',
'''GPTNeoXForQuestionAnswering''',
'''GPTNeoXForSequenceClassification''',
'''GPTNeoXForTokenClassification''',
'''GPTNeoXLayer''',
'''GPTNeoXModel''',
'''GPTNeoXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 1 |
"""simple docstring"""
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS}
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f"Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}." )
if tokenizer_name is None:
_UpperCAmelCase = TOKENIZER_CLASSES
else:
_UpperCAmelCase = {tokenizer_name: getattr(UpperCamelCase__ , tokenizer_name + "Fast" )}
logger.info(f"Loading tokenizer classes: {tokenizer_names}" )
for tokenizer_name in tokenizer_names:
_UpperCAmelCase = TOKENIZER_CLASSES[tokenizer_name]
_UpperCAmelCase = True
if checkpoint_name is None:
_UpperCAmelCase = list(tokenizer_class.max_model_input_sizes.keys() )
else:
_UpperCAmelCase = [checkpoint_name]
logger.info(f"For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}" )
for checkpoint in checkpoint_names:
logger.info(f"Loading {tokenizer_class.__class__.__name__} {checkpoint}" )
# Load tokenizer
_UpperCAmelCase = tokenizer_class.from_pretrained(UpperCamelCase__ , force_download=UpperCamelCase__ )
# Save fast tokenizer
logger.info(f"Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}" )
# For organization names we create sub-directories
if "/" in checkpoint:
_UpperCAmelCase , _UpperCAmelCase = checkpoint.split("/" )
_UpperCAmelCase = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
elif add_prefix:
_UpperCAmelCase = checkpoint
_UpperCAmelCase = dump_path
else:
_UpperCAmelCase = None
_UpperCAmelCase = dump_path
logger.info(f"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
_UpperCAmelCase = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
_UpperCAmelCase = file_path.split(UpperCamelCase__ )[-1][0]
if next_char == "/":
_UpperCAmelCase = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase = None
logger.info(f"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
_UpperCAmelCase = tokenizer.save_pretrained(
UpperCamelCase__ , legacy_format=UpperCamelCase__ , filename_prefix=UpperCamelCase__ )
logger.info(f"=> File names {file_names}" )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(UpperCamelCase__ )
logger.info(f"=> removing {file_name}" )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.'''
)
parser.add_argument(
'''--tokenizer_name''',
default=None,
type=str,
help=(
f'''Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '''
'''download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--checkpoint_name''',
default=None,
type=str,
help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''',
)
parser.add_argument(
'''--force_download''',
action='''store_true''',
help='''Re-download checkpoints.''',
)
__magic_name__ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 657 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : Union[str, Any] = '''convbert'''
def __init__( self , a_=30522 , a_=768 , a_=12 , a_=12 , a_=3072 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=2 , a_=0.02 , a_=1e-12 , a_=1 , a_=0 , a_=2 , a_=768 , a_=2 , a_=9 , a_=1 , a_=None , **a_ , ) -> Tuple:
super().__init__(
pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ , )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = embedding_size
_UpperCAmelCase = head_ratio
_UpperCAmelCase = conv_kernel_size
_UpperCAmelCase = num_groups
_UpperCAmelCase = classifier_dropout
class _lowerCAmelCase ( lowerCamelCase ):
@property
def _a ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_UpperCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 657 | 1 |
"""simple docstring"""
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
'''constant''': get_constant_schedule,
'''constant_w_warmup''': get_constant_schedule_with_warmup,
}
class _lowerCAmelCase ( lowerCamelCase ):
def __init__( self , a_=None , a_=None , *a_ , **a_ ) -> int:
super().__init__(*a_ , **a_ )
if config is None:
assert isinstance(self.model , a_ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f" {self.model.__class__}"
)
_UpperCAmelCase = self.model.config
else:
_UpperCAmelCase = config
_UpperCAmelCase = data_args
_UpperCAmelCase = self.config.tgt_vocab_size if isinstance(self.config , a_ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f"The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"
" padding.." )
if self.args.label_smoothing == 0:
_UpperCAmelCase = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_UpperCAmelCase = label_smoothed_nll_loss
def _a ( self , a_ ) -> Tuple:
if self.optimizer is None:
_UpperCAmelCase = ["bias", "LayerNorm.weight"]
_UpperCAmelCase = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
_UpperCAmelCase = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_UpperCAmelCase = Adafactor
_UpperCAmelCase = {"scale_parameter": False, "relative_step": False}
else:
_UpperCAmelCase = AdamW
_UpperCAmelCase = {
"betas": (self.args.adam_betaa, self.args.adam_betaa),
"eps": self.args.adam_epsilon,
}
_UpperCAmelCase = self.args.learning_rate
if self.sharded_ddp:
_UpperCAmelCase = OSS(
params=a_ , optim=a_ , **a_ , )
else:
_UpperCAmelCase = optimizer_cls(a_ , **a_ )
if self.lr_scheduler is None:
_UpperCAmelCase = self._get_lr_scheduler(a_ )
else: # ignoring --lr_scheduler
logger.warning("scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored." )
def _a ( self , a_ ) -> List[Any]:
_UpperCAmelCase = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_UpperCAmelCase = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_UpperCAmelCase = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
_UpperCAmelCase = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=a_ )
return scheduler
def _a ( self ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def _a ( self , a_ , a_ , a_ ) -> List[Any]:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_UpperCAmelCase = model(**a_ , use_cache=a_ )[0]
_UpperCAmelCase = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
_UpperCAmelCase , _UpperCAmelCase = model(**a_ , labels=a_ , use_cache=a_ )[:2]
else:
# compute label smoothed loss
_UpperCAmelCase = model(**a_ , use_cache=a_ )[0]
_UpperCAmelCase = torch.nn.functional.log_softmax(a_ , dim=-1 )
_UpperCAmelCase , _UpperCAmelCase = self.loss_fn(a_ , a_ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def _a ( self , a_ , a_ ) -> int:
_UpperCAmelCase = inputs.pop("labels" )
_UpperCAmelCase , _UpperCAmelCase = self._compute_loss(a_ , a_ , a_ )
return loss
def _a ( self , a_ , a_ , a_ , a_ = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
_UpperCAmelCase = self._prepare_inputs(a_ )
_UpperCAmelCase = {
"max_length": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"num_beams": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_UpperCAmelCase = self.model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , **a_ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_UpperCAmelCase = self._pad_tensors_to_max_len(a_ , gen_kwargs["max_length"] )
_UpperCAmelCase = inputs.pop("labels" )
with torch.no_grad():
# compute loss on predict data
_UpperCAmelCase , _UpperCAmelCase = self._compute_loss(a_ , a_ , a_ )
_UpperCAmelCase = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_UpperCAmelCase = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_UpperCAmelCase = self._pad_tensors_to_max_len(a_ , gen_kwargs["max_length"] )
return (loss, logits, labels)
def _a ( self , a_ , a_ ) -> Dict:
# If PAD token is not defined at least EOS token has to be defined
_UpperCAmelCase = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"
f" padded to `max_length`={max_length}" )
_UpperCAmelCase = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
_UpperCAmelCase = tensor
return padded_tensor
| 657 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
return "".join([hex(UpperCamelCase__ )[2:].zfill(2 ).upper() for byte in list(UpperCamelCase__ )] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if (len(UpperCamelCase__ ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(UpperCamelCase__ ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(UpperCamelCase__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = '''▁'''
__magic_name__ = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''}
__magic_name__ = {
'''vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''',
},
'''monolingual_vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''',
},
}
__magic_name__ = {'''vinai/bartpho-syllable''': 10_24}
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : Optional[int] = VOCAB_FILES_NAMES
lowercase_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : Union[str, Any] = ['''input_ids''', '''attention_mask''']
def __init__( self , a_ , a_ , a_="<s>" , a_="</s>" , a_="</s>" , a_="<s>" , a_="<unk>" , a_="<pad>" , a_="<mask>" , a_ = None , **a_ , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a_ , eos_token=a_ , unk_token=a_ , sep_token=a_ , cls_token=a_ , pad_token=a_ , mask_token=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , )
_UpperCAmelCase = vocab_file
_UpperCAmelCase = monolingual_vocab_file
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(a_ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_UpperCAmelCase = {}
_UpperCAmelCase = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(a_ ) not in self.fairseq_tokens_to_ids:
_UpperCAmelCase = cnt
cnt += 1
with open(a_ , "r" , encoding="utf-8" ) as f:
for line in f.readlines():
_UpperCAmelCase = line.strip().split()[0]
_UpperCAmelCase = len(self.fairseq_tokens_to_ids )
if str(a_ ) not in self.fairseq_tokens_to_ids:
_UpperCAmelCase = len(self.fairseq_tokens_to_ids )
_UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> List[Any]:
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
_UpperCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , a_ ) -> Optional[int]:
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_UpperCAmelCase = {}
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _a ( self , a_ , a_ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
_UpperCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _a ( self , a_ , a_ = None , a_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
if token_ids_a is None:
return [1] + ([0] * len(a_ )) + [1]
return [1] + ([0] * len(a_ )) + [1, 1] + ([0] * len(a_ )) + [1]
def _a ( self , a_ , a_ = None ) -> List[int]:
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _a ( self ) -> Dict:
return len(self.fairseq_ids_to_tokens )
def _a ( self ) -> Any:
_UpperCAmelCase = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _a ( self , a_ ) -> List[str]:
return self.sp_model.encode(a_ , out_type=a_ )
def _a ( self , a_ ) -> Any:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def _a ( self , a_ ) -> Any:
return self.fairseq_ids_to_tokens[index]
def _a ( self , a_ ) -> Dict:
_UpperCAmelCase = "".join(a_ ).replace(a_ , " " ).strip()
return out_string
def _a ( self , a_ , a_ = None ) -> Tuple[str]:
if not os.path.isdir(a_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_UpperCAmelCase = os.path.join(
a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
_UpperCAmelCase = os.path.join(
a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["monolingual_vocab_file"] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a_ )
elif not os.path.isfile(self.vocab_file ):
with open(a_ , "wb" ) as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(a_ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
a_ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , a_ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(a_ , "w" , encoding="utf-8" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f"{str(a_ )} \n" )
return out_vocab_file, out_monolingual_vocab_file
| 657 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
try:
_UpperCAmelCase = float(UpperCamelCase__ )
except ValueError:
raise ValueError("Please enter a valid number" )
_UpperCAmelCase = decimal - int(UpperCamelCase__ )
if fractional_part == 0:
return int(UpperCamelCase__ ), 1
else:
_UpperCAmelCase = len(str(UpperCamelCase__ ).split("." )[1] )
_UpperCAmelCase = int(decimal * (10**number_of_frac_digits) )
_UpperCAmelCase = 10**number_of_frac_digits
_UpperCAmelCase , _UpperCAmelCase = denominator, numerator
while True:
_UpperCAmelCase = dividend % divisor
if remainder == 0:
break
_UpperCAmelCase , _UpperCAmelCase = divisor, remainder
_UpperCAmelCase , _UpperCAmelCase = numerator / divisor, denominator / divisor
return int(UpperCamelCase__ ), int(UpperCamelCase__ )
if __name__ == "__main__":
print(f'''{decimal_to_fraction(2) = }''')
print(f'''{decimal_to_fraction(89.0) = }''')
print(f'''{decimal_to_fraction("67") = }''')
print(f'''{decimal_to_fraction("45.0") = }''')
print(f'''{decimal_to_fraction(1.5) = }''')
print(f'''{decimal_to_fraction("6.25") = }''')
print(f'''{decimal_to_fraction("78td") = }''')
| 657 | 1 |
"""simple docstring"""
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase_ : Dict = VQModel
lowercase_ : int = '''sample'''
@property
def _a ( self , a_=(32, 32) ) -> Union[str, Any]:
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(a_ )
return {"sample": image}
@property
def _a ( self ) -> Tuple:
return (3, 32, 32)
@property
def _a ( self ) -> Tuple:
return (3, 32, 32)
def _a ( self ) -> List[str]:
_UpperCAmelCase = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def _a ( self ) -> int:
pass
def _a ( self ) -> Union[str, Any]:
pass
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase , _UpperCAmelCase = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=a_ )
self.assertIsNotNone(a_ )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(a_ )
_UpperCAmelCase = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def _a ( self ) -> Optional[int]:
_UpperCAmelCase = VQModel.from_pretrained("fusing/vqgan-dummy" )
model.to(a_ ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
_UpperCAmelCase = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
_UpperCAmelCase = image.to(a_ )
with torch.no_grad():
_UpperCAmelCase = model(a_ ).sample
_UpperCAmelCase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_UpperCAmelCase = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] )
# fmt: on
self.assertTrue(torch.allclose(a_ , a_ , atol=1e-3 ) )
| 657 |
"""simple docstring"""
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_UpperCAmelCase = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
_UpperCAmelCase = f"{src_lang}-{tgt_lang}"
_UpperCAmelCase = f"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n"
model_card_dir.mkdir(parents=UpperCamelCase__ , exist_ok=UpperCamelCase__ )
_UpperCAmelCase = os.path.join(UpperCamelCase__ , "README.md" )
print(f"Generating {path}" )
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as f:
f.write(UpperCamelCase__ )
# make sure we are under the root of the project
__magic_name__ = Path(__file__).resolve().parent.parent.parent
__magic_name__ = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
__magic_name__ = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name)
| 657 | 1 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class _lowerCAmelCase ( unittest.TestCase ):
lowercase_ : Dict = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowercase_ : Any = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _a ( self , a_ , a_ , a_ ) -> Any:
_UpperCAmelCase = TextaTextGenerationPipeline(model=a_ , tokenizer=a_ )
return generator, ["Something to write", "Something else"]
def _a ( self , a_ , a_ ) -> List[Any]:
_UpperCAmelCase = generator("Something there" )
self.assertEqual(a_ , [{"generated_text": ANY(a_ )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["generated_text"].startswith("Something there" ) )
_UpperCAmelCase = generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=a_ )
self.assertEqual(
a_ , [
[{"generated_text": ANY(a_ )}, {"generated_text": ANY(a_ )}],
[{"generated_text": ANY(a_ )}, {"generated_text": ANY(a_ )}],
] , )
_UpperCAmelCase = generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=a_ )
self.assertEqual(
a_ , [
[{"generated_text": ANY(a_ )}, {"generated_text": ANY(a_ )}],
[{"generated_text": ANY(a_ )}, {"generated_text": ANY(a_ )}],
] , )
with self.assertRaises(a_ ):
generator(4 )
@require_torch
def _a ( self ) -> Optional[int]:
_UpperCAmelCase = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="pt" )
# do_sample=False necessary for reproducibility
_UpperCAmelCase = generator("Something there" , do_sample=a_ )
self.assertEqual(a_ , [{"generated_text": ""}] )
_UpperCAmelCase = 3
_UpperCAmelCase = generator(
"Something there" , num_return_sequences=a_ , num_beams=a_ , )
_UpperCAmelCase = [
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": ""},
]
self.assertEqual(a_ , a_ )
_UpperCAmelCase = generator("This is a test" , do_sample=a_ , num_return_sequences=2 , return_tensors=a_ )
self.assertEqual(
a_ , [
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
] , )
_UpperCAmelCase = generator.model.config.eos_token_id
_UpperCAmelCase = "<pad>"
_UpperCAmelCase = generator(
["This is a test", "This is a second test"] , do_sample=a_ , num_return_sequences=2 , batch_size=2 , return_tensors=a_ , )
self.assertEqual(
a_ , [
[
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
],
[
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
],
] , )
@require_tf
def _a ( self ) -> List[Any]:
_UpperCAmelCase = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="tf" )
# do_sample=False necessary for reproducibility
_UpperCAmelCase = generator("Something there" , do_sample=a_ )
self.assertEqual(a_ , [{"generated_text": ""}] )
| 657 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _lowerCAmelCase ( metaclass=lowerCamelCase ):
lowercase_ : Dict = ['''torch''', '''torchsde''']
def __init__( self , *a_ , **a_ ) -> Optional[int]:
requires_backends(self , ["torch", "torchsde"] )
@classmethod
def _a ( cls , *a_ , **a_ ) -> Optional[Any]:
requires_backends(cls , ["torch", "torchsde"] )
@classmethod
def _a ( cls , *a_ , **a_ ) -> List[Any]:
requires_backends(cls , ["torch", "torchsde"] )
| 657 | 1 |
"""simple docstring"""
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = HfArgumentParser(UpperCamelCase__ )
_UpperCAmelCase = parser.parse_args_into_dataclasses()[0]
_UpperCAmelCase = TensorFlowBenchmark(args=UpperCamelCase__ )
try:
_UpperCAmelCase = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
_UpperCAmelCase = "Arg --no_{0} is no longer used, please use --no-{0} instead."
_UpperCAmelCase = " ".join(str(UpperCamelCase__ ).split(" " )[:-1] )
_UpperCAmelCase = ""
_UpperCAmelCase = eval(str(UpperCamelCase__ ).split(" " )[-1] )
_UpperCAmelCase = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
_UpperCAmelCase = full_error_msg + begin_error_msg + str(UpperCamelCase__ )
raise ValueError(UpperCamelCase__ )
benchmark.run()
if __name__ == "__main__":
main()
| 657 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
__magic_name__ = logging.get_logger(__name__)
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : Optional[Any] = '''upernet'''
def __init__( self , a_=None , a_=512 , a_=0.02 , a_=[1, 2, 3, 6] , a_=True , a_=0.4 , a_=384 , a_=256 , a_=1 , a_=False , a_=255 , **a_ , ) -> List[Any]:
super().__init__(**a_ )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_UpperCAmelCase = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(a_ , a_ ):
_UpperCAmelCase = backbone_config.get("model_type" )
_UpperCAmelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase = config_class.from_dict(a_ )
_UpperCAmelCase = backbone_config
_UpperCAmelCase = hidden_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = pool_scales
_UpperCAmelCase = use_auxiliary_head
_UpperCAmelCase = auxiliary_loss_weight
_UpperCAmelCase = auxiliary_in_channels
_UpperCAmelCase = auxiliary_channels
_UpperCAmelCase = auxiliary_num_convs
_UpperCAmelCase = auxiliary_concat_input
_UpperCAmelCase = loss_ignore_index
def _a ( self ) -> int:
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.backbone_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
| 657 | 1 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ): # noqa: E741
"""simple docstring"""
_UpperCAmelCase = len(UpperCamelCase__ )
_UpperCAmelCase = 0
_UpperCAmelCase = [0] * n
_UpperCAmelCase = [False] * n
_UpperCAmelCase = [False] * n
def dfs(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
if parent == root:
out_edge_count += 1
_UpperCAmelCase = True
_UpperCAmelCase = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_UpperCAmelCase = dfs(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
_UpperCAmelCase = True
# AP found via cycle
if at == low[to]:
_UpperCAmelCase = True
else:
_UpperCAmelCase = min(low[at] , UpperCamelCase__ )
return out_edge_count
for i in range(UpperCamelCase__ ):
if not visited[i]:
_UpperCAmelCase = 0
_UpperCAmelCase = dfs(UpperCamelCase__ , UpperCamelCase__ , -1 , UpperCamelCase__ )
_UpperCAmelCase = out_edge_count > 1
for x in range(len(UpperCamelCase__ ) ):
if is_art[x] is True:
print(UpperCamelCase__ )
# Adjacency list of graph
__magic_name__ = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 657 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import _LazyModule
__magic_name__ = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 657 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
__magic_name__ = list[list[float | int]]
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = len(UpperCamelCase__ )
_UpperCAmelCase = [[0 for _ in range(size + 1 )] for _ in range(UpperCamelCase__ )]
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
for row in range(UpperCamelCase__ ):
for col in range(UpperCamelCase__ ):
_UpperCAmelCase = matrix[row][col]
_UpperCAmelCase = vector[row][0]
_UpperCAmelCase = 0
_UpperCAmelCase = 0
while row < size and col < size:
# pivoting
_UpperCAmelCase = max((abs(augmented[rowa][col] ), rowa) for rowa in range(UpperCamelCase__ , UpperCamelCase__ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_UpperCAmelCase , _UpperCAmelCase = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , UpperCamelCase__ ):
_UpperCAmelCase = augmented[rowa][col] / augmented[row][col]
_UpperCAmelCase = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , UpperCamelCase__ ):
for row in range(UpperCamelCase__ ):
_UpperCAmelCase = augmented[row][col] / augmented[col][col]
for cola in range(UpperCamelCase__ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(UpperCamelCase__ )
]
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = len(UpperCamelCase__ )
_UpperCAmelCase = [[0 for _ in range(UpperCamelCase__ )] for _ in range(UpperCamelCase__ )]
_UpperCAmelCase = [[0] for _ in range(UpperCamelCase__ )]
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
for x_val, y_val in enumerate(UpperCamelCase__ ):
for col in range(UpperCamelCase__ ):
_UpperCAmelCase = (x_val + 1) ** (size - col - 1)
_UpperCAmelCase = y_val
_UpperCAmelCase = solve(UpperCamelCase__ , UpperCamelCase__ )
def interpolated_func(UpperCamelCase__ ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(UpperCamelCase__ ) )
return interpolated_func
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def __lowerCamelCase ( UpperCamelCase__ = question_function , UpperCamelCase__ = 10 ):
"""simple docstring"""
_UpperCAmelCase = [func(UpperCamelCase__ ) for x_val in range(1 , order + 1 )]
_UpperCAmelCase = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_UpperCAmelCase = 0
_UpperCAmelCase = 42
_UpperCAmelCase = 42
for poly in polynomials:
_UpperCAmelCase = 1
while func(UpperCamelCase__ ) == poly(UpperCamelCase__ ):
x_val += 1
ret += poly(UpperCamelCase__ )
return ret
if __name__ == "__main__":
print(f'''{solution() = }''')
| 657 |
"""simple docstring"""
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCAmelCase :
def __init__( self , a_ , a_=2 , a_=3 , a_=4 , a_=2 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=99 , a_=36 , a_=3 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=16 , a_=2 , a_=0.02 , a_=6 , a_=6 , a_=3 , a_=4 , a_=None , a_=1000 , ) -> Optional[Any]:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = text_seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = coordinate_size
_UpperCAmelCase = shape_size
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_UpperCAmelCase = text_seq_length
_UpperCAmelCase = (image_size // patch_size) ** 2 + 1
_UpperCAmelCase = self.text_seq_length + self.image_seq_length
def _a ( self ) -> Dict:
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_UpperCAmelCase = bbox[i, j, 3]
_UpperCAmelCase = bbox[i, j, 1]
_UpperCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_UpperCAmelCase = bbox[i, j, 2]
_UpperCAmelCase = bbox[i, j, 0]
_UpperCAmelCase = t
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.text_seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_UpperCAmelCase = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Tuple:
_UpperCAmelCase = LayoutLMvaModel(config=a_ )
model.to(a_ )
model.eval()
# text + image
_UpperCAmelCase = model(a_ , pixel_values=a_ )
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ )
_UpperCAmelCase = model(a_ , bbox=a_ , pixel_values=a_ , token_type_ids=a_ )
_UpperCAmelCase = model(a_ , bbox=a_ , pixel_values=a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_UpperCAmelCase = model(a_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_UpperCAmelCase = model(pixel_values=a_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Optional[Any]:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = LayoutLMvaForSequenceClassification(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Union[str, Any]:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = LayoutLMvaForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Dict:
_UpperCAmelCase = LayoutLMvaForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , start_positions=a_ , end_positions=a_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self ) -> Optional[int]:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase_ : Any = False
lowercase_ : Dict = False
lowercase_ : List[str] = False
lowercase_ : str = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase_ : int = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def _a ( self , a_ , a_ , a_ , a_ , a_ ) -> List[str]:
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = LayoutLMvaModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=a_ , hidden_size=37 )
def _a ( self , a_ , a_ , a_=False ) -> List[str]:
_UpperCAmelCase = copy.deepcopy(a_ )
if model_class in get_values(a_ ):
_UpperCAmelCase = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(a_ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(a_ ):
_UpperCAmelCase = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=a_ )
elif model_class in get_values(a_ ):
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
elif model_class in [
*get_values(a_ ),
]:
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
elif model_class in [
*get_values(a_ ),
]:
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=a_ , )
return inputs_dict
def _a ( self ) -> int:
self.config_tester.run_common_tests()
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase = type
self.model_tester.create_and_check_model(*a_ )
def _a ( self ) -> int:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a_ )
def _a ( self ) -> Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a_ )
@slow
def _a ( self ) -> List[str]:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = LayoutLMvaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _a ( self ) -> List[Any]:
return LayoutLMvaImageProcessor(apply_ocr=a_ ) if is_vision_available() else None
@slow
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(a_ )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=a_ , return_tensors="pt" ).pixel_values.to(a_ )
_UpperCAmelCase = torch.tensor([[1, 2]] )
_UpperCAmelCase = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
_UpperCAmelCase = model(
input_ids=input_ids.to(a_ ) , bbox=bbox.to(a_ ) , pixel_values=pixel_values.to(a_ ) , )
# verify the logits
_UpperCAmelCase = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , a_ )
_UpperCAmelCase = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , a_ , atol=1e-4 ) )
| 657 | 1 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(UpperCamelCase__ , n - 1 , UpperCamelCase__ ) * a) % mod
else:
_UpperCAmelCase = binary_exponentiation(UpperCamelCase__ , n / 2 , UpperCamelCase__ )
return (b * b) % mod
# a prime number
__magic_name__ = 7_01
__magic_name__ = 10_00_00_00_00
__magic_name__ = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 657 |
"""simple docstring"""
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _lowerCAmelCase ( unittest.TestCase ):
lowercase_ : str = MODEL_FOR_MASKED_LM_MAPPING
lowercase_ : List[str] = TF_MODEL_FOR_MASKED_LM_MAPPING
def _a ( self ) -> Optional[Any]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def _a ( self ) -> str:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf" )
_UpperCAmelCase = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is grouped", "score": 2.1e-05, "token": 38015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-05, "token": 25506, "token_str": " accuser"},
] , )
_UpperCAmelCase = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-05,
"token": 38015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-05,
"token": 25506,
"token_str": " accuser",
},
] , )
_UpperCAmelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-05, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt" )
_UpperCAmelCase = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is Maul", "score": 2.2e-05, "token": 35676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
_UpperCAmelCase = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
_UpperCAmelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is Patrick", "score": 2.1e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-05, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
] , )
_UpperCAmelCase = unmasker("My name is <mask> <mask>" , top_k=2 )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def _a ( self ) -> int:
_UpperCAmelCase = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt" )
# convert model to fp16
pipe.model.half()
_UpperCAmelCase = pipe("Paris is the [MASK] of France." )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(a_ , a_ )
@slow
@require_torch
def _a ( self ) -> int:
_UpperCAmelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt" )
self.run_large_test(a_ )
@slow
@require_tf
def _a ( self ) -> int:
_UpperCAmelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf" )
self.run_large_test(a_ )
def _a ( self , a_ ) -> int:
_UpperCAmelCase = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(a_ ) , [
{"sequence": "My name is John", "score": 0.008, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.007, "token": 1573, "token_str": " Chris"},
] , )
_UpperCAmelCase = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(a_ ) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.251,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.214,
"token": 12790,
"token_str": " Lyon",
},
] , )
_UpperCAmelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(a_ ) , [
{"sequence": "My name is Patrick", "score": 0.005, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.000, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.000, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def _a ( self ) -> Any:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt" )
_UpperCAmelCase = None
_UpperCAmelCase = None
self.run_pipeline_test(a_ , [] )
@require_tf
def _a ( self ) -> List[Any]:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf" )
_UpperCAmelCase = None
_UpperCAmelCase = None
self.run_pipeline_test(a_ , [] )
def _a ( self , a_ , a_ , a_ ) -> Optional[Any]:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)" )
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = [
f"This is another {tokenizer.mask_token} test",
]
return fill_masker, examples
def _a ( self , a_ , a_ ) -> List[str]:
_UpperCAmelCase = fill_masker.tokenizer
_UpperCAmelCase = fill_masker.model
_UpperCAmelCase = fill_masker(
f"This is a {tokenizer.mask_token}" , )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = fill_masker([f"This is a {tokenizer.mask_token}"] )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = fill_masker([f"This is a {tokenizer.mask_token}", f"Another {tokenizer.mask_token} great test."] )
self.assertEqual(
a_ , [
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
] , )
with self.assertRaises(a_ ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(a_ ):
fill_masker("This is" )
self.run_test_top_k(a_ , a_ )
self.run_test_targets(a_ , a_ )
self.run_test_top_k_targets(a_ , a_ )
self.fill_mask_with_duplicate_targets_and_top_k(a_ , a_ )
self.fill_mask_with_multiple_masks(a_ , a_ )
def _a ( self , a_ , a_ ) -> Optional[int]:
_UpperCAmelCase = tokenizer.get_vocab()
_UpperCAmelCase = sorted(vocab.keys() )[:2]
# Pipeline argument
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ , targets=a_ )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , a_ )
_UpperCAmelCase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(a_ ) )
# Call argument
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=a_ )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , a_ )
_UpperCAmelCase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(a_ ) )
# Score equivalence
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=a_ )
_UpperCAmelCase = [top_mask["token_str"] for top_mask in outputs]
_UpperCAmelCase = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(a_ ) == set(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=a_ )
_UpperCAmelCase = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(a_ ) , nested_simplify(a_ ) )
# Raises with invalid
with self.assertRaises(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=[""] )
with self.assertRaises(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets="" )
def _a ( self , a_ , a_ ) -> str:
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ , top_k=2 )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
self.assertEqual(nested_simplify(a_ ) , nested_simplify(a_ ) )
def _a ( self , a_ , a_ ) -> List[Any]:
_UpperCAmelCase = tokenizer.get_vocab()
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
# top_k=2, ntargets=3
_UpperCAmelCase = sorted(vocab.keys() )[:3]
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=2 , targets=a_ )
# If we use the most probably targets, and filter differently, we should still
# have the same results
_UpperCAmelCase = [el["token_str"] for el in sorted(a_ , key=lambda a_ : x["score"] , reverse=a_ )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(a_ ).issubset(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=3 , targets=a_ )
# They should yield exactly the same result
self.assertEqual(nested_simplify(a_ ) , nested_simplify(a_ ) )
def _a ( self , a_ , a_ ) -> Optional[Any]:
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = tokenizer.get_vocab()
# String duplicates + id duplicates
_UpperCAmelCase = sorted(vocab.keys() )[:3]
_UpperCAmelCase = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_UpperCAmelCase = fill_masker(f"My name is {tokenizer.mask_token}" , targets=a_ , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(a_ ) , 3 )
def _a ( self , a_ , a_ ) -> Any:
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = fill_masker(
f"This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
a_ , [
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
] , )
| 657 | 1 |
"""simple docstring"""
class _lowerCAmelCase :
def __init__( self ) -> Any:
_UpperCAmelCase = {}
def _a ( self ) -> None:
print(self.vertex )
for i in self.vertex:
print(a_ , " -> " , " -> ".join([str(a_ ) for j in self.vertex[i]] ) )
def _a ( self , a_ , a_ ) -> None:
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(a_ )
else:
# else make a new vertex
_UpperCAmelCase = [to_vertex]
def _a ( self ) -> None:
# visited array for storing already visited nodes
_UpperCAmelCase = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(a_ , a_ )
def _a ( self , a_ , a_ ) -> None:
# mark start vertex as visited
_UpperCAmelCase = True
print(a_ , end=" " )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(a_ , a_ )
if __name__ == "__main__":
__magic_name__ = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('''DFS:''')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 657 |
"""simple docstring"""
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class _lowerCAmelCase ( lowerCamelCase ):
def _a ( self ) -> List[str]:
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def _a ( self ) -> Optional[int]:
with self.assertRaises(a_ ):
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def _a ( self ) -> int:
with self.assertRaises(a_ ):
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("bool" ) , type=Value("int64" ) ) )
def _a ( self ) -> Optional[Any]:
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _a ( self ) -> int:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , type=Value("int64" ) ) )
def _a ( self ) -> Dict:
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , try_type=Value("int64" ) ) )
self.assertEqual(arr.type , pa.string() )
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def _a ( self ) -> Tuple:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , type=ArrayaD((1, 3) , "int64" ) ) )
def _a ( self ) -> str:
_UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def _a ( self ) -> Tuple:
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def _a ( self ) -> List[str]:
import PIL.Image
_UpperCAmelCase = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"datasets.arrow_writer.cast_to_python_objects" , side_effect=a_ ) as mock_cast_to_python_objects:
_UpperCAmelCase = pa.array(TypedSequence([{"path": None, "bytes": B"image_bytes"}, pil_image] , type=Image() ) )
_UpperCAmelCase , _UpperCAmelCase = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("optimize_list_casting" , a_ )
self.assertFalse(kwargs["optimize_list_casting"] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferReader(UpperCamelCase__ ) if isinstance(UpperCamelCase__ , pa.Buffer ) else pa.memory_map(UpperCamelCase__ )
_UpperCAmelCase = pa.ipc.open_stream(UpperCamelCase__ )
_UpperCAmelCase = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = Features({"labels": ClassLabel(names=["neg", "pos"] )} )
with ArrowWriter(stream=UpperCamelCase__ , features=UpperCamelCase__ ) as writer:
writer.write({"labels": 0} )
writer.write({"labels": 1} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pa.ipc.open_stream(UpperCamelCase__ )
_UpperCAmelCase = f.read_all()
_UpperCAmelCase = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(UpperCamelCase__ )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ , hash_salt="split_name" , check_duplicates=UpperCamelCase__ , ) as writer:
with pytest.raises(UpperCamelCase__ ):
writer.write({"col_1": "foo", "col_2": 1} , key=[1, 2] )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ , hash_salt="split_name" , check_duplicates=UpperCamelCase__ , ) as writer:
with pytest.raises(UpperCamelCase__ ):
writer.write({"col_1": "foo", "col_2": 1} , key=10 )
writer.write({"col_1": "bar", "col_2": 2} , key=10 )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ , hash_salt="split_name" , check_duplicates=UpperCamelCase__ , ) as writer:
writer.write({"col_1": "foo", "col_2": 1} , key=1 )
writer.write({"col_1": "bar", "col_2": 2} , key=2 )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
writer.write_batch({"col_1": [], "col_2": []} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write_table(pa.Table.from_pydict({"col_1": ["foo", "bar"], "col_2": [1, 2]} ) )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write_row(pa.Table.from_pydict({"col_1": ["foo"], "col_2": [1]} ) )
writer.write_row(pa.Table.from_pydict({"col_1": ["bar"], "col_2": [2]} ) )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __lowerCamelCase ( ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
_UpperCAmelCase = os.path.join(UpperCamelCase__ , "test.arrow" )
with ArrowWriter(path=UpperCamelCase__ , schema=pa.schema(UpperCamelCase__ ) ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(UpperCamelCase__ , 1 )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if pa.types.is_list(UpperCamelCase__ ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if isinstance(lst[0] , UpperCamelCase__ ):
change_first_primitive_element_in_list(lst[0] , UpperCamelCase__ )
else:
_UpperCAmelCase = value
@pytest.mark.parametrize("optimized_int_type, expected_dtype" , [(None, pa.intaa()), (Value("int32" ), pa.intaa())] )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.array(TypedSequence(UpperCamelCase__ , optimized_int_type=UpperCamelCase__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"col, expected_dtype" , [
("attention_mask", pa.inta()),
("special_tokens_mask", pa.inta()),
("token_type_ids", pa.inta()),
("input_ids", pa.intaa()),
("other", pa.intaa()),
] , )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.array(OptimizedTypedSequence(UpperCamelCase__ , col=UpperCamelCase__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
_UpperCAmelCase = copy.deepcopy(UpperCamelCase__ )
_UpperCAmelCase = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase = pa.array(OptimizedTypedSequence(UpperCamelCase__ , col=UpperCamelCase__ ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("raise_exception" , [False, True] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = str(tmp_path / "dataset-train.arrow" )
try:
with ArrowWriter(path=UpperCamelCase__ ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = "mock://dataset-train.arrow"
with ArrowWriter(path=UpperCamelCase__ , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(UpperCamelCase__ ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(UpperCamelCase__ )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(stream=UpperCamelCase__ ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pq.read_table(UpperCamelCase__ )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("embed_local_files" , [False, True] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
import PIL.Image
_UpperCAmelCase = str(tmp_path / "test_image_rgb.jpg" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(UpperCamelCase__ , format="png" )
_UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(
stream=UpperCamelCase__ , features=Features({"image": Image()} ) , embed_local_files=UpperCamelCase__ ) as writer:
writer.write({"image": image_path} )
writer.finalize()
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pq.read_table(UpperCamelCase__ )
_UpperCAmelCase = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["image"][0]["path"] , UpperCamelCase__ )
with open(UpperCamelCase__ , "rb" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = pa.schema([pa.field("col_1" , pa.string() , nullable=UpperCamelCase__ )] )
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(stream=UpperCamelCase__ ) as writer:
writer._build_writer(inferred_schema=UpperCamelCase__ )
assert writer._schema == pa.schema([pa.field("col_1" , pa.string() )] )
| 657 | 1 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _lowerCAmelCase ( lowerCamelCase ):
@staticmethod
@abstractmethod
def _a ( a_ ) -> str:
raise NotImplementedError()
@abstractmethod
def _a ( self ) -> Tuple:
raise NotImplementedError()
| 657 |
"""simple docstring"""
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class _lowerCAmelCase ( unittest.TestCase ):
def _a ( self ) -> Optional[Any]:
_UpperCAmelCase = ["a", "b", "c"]
# Defaults to last layer if both are None
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(a_ , a_ , a_ )
self.assertEqual(a_ , ["c"] )
self.assertEqual(a_ , [2] )
# Out indices set to match out features
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(["a", "c"] , a_ , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features set to match out indices
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(a_ , [0, 2] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features selected from negative indices
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(a_ , [-3, -1] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [-3, -1] )
def _a ( self ) -> Optional[int]:
# Stage names must be set
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , a_ )
# Out features must be a list
with self.assertRaises(a_ ):
verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] )
# Out features must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] )
# Out indices must be a list or tuple
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , 0 , ["a", "b"] )
# Out indices must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , (0, 1) , ["a"] )
# Out features and out indices must be the same length
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] )
# Out features should match out indices
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] )
# Out features and out indices should be in order
with self.assertRaises(a_ ):
verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] )
# Check passes with valid inputs
verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] )
def _a ( self ) -> int:
_UpperCAmelCase = BackboneMixin()
_UpperCAmelCase = ["a", "b", "c"]
_UpperCAmelCase = ["a", "c"]
_UpperCAmelCase = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
_UpperCAmelCase = ["a", "b"]
self.assertEqual(backbone.out_features , ["a", "b"] )
self.assertEqual(backbone.out_indices , [0, 1] )
_UpperCAmelCase = [-3, -1]
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 657 | 1 |
"""simple docstring"""
from functools import lru_cache
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = 2
_UpperCAmelCase = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(UpperCamelCase__ )
if n > 1:
factors.add(UpperCamelCase__ )
return factors
@lru_cache
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
return len(unique_prime_factors(UpperCamelCase__ ) )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
return len(set(UpperCamelCase__ ) ) in (0, 1)
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = 2
while True:
# Increment each value of a generated range
_UpperCAmelCase = [base + i for i in range(UpperCamelCase__ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
_UpperCAmelCase = [upf_len(UpperCamelCase__ ) for x in group]
checker.append(UpperCamelCase__ )
# If all numbers in the list are equal, return the group variable.
if equality(UpperCamelCase__ ):
return group
# Increment our base variable by 1
base += 1
def __lowerCamelCase ( UpperCamelCase__ = 4 ):
"""simple docstring"""
_UpperCAmelCase = run(UpperCamelCase__ )
return results[0] if len(UpperCamelCase__ ) else None
if __name__ == "__main__":
print(solution())
| 657 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 1 |
"""simple docstring"""
from math import factorial
def __lowerCamelCase ( UpperCamelCase__ = 100 ):
"""simple docstring"""
return sum(int(UpperCamelCase__ ) for x in str(factorial(UpperCamelCase__ ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 657 |
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class _lowerCAmelCase ( lowerCamelCase , unittest.TestCase ):
lowercase_ : Tuple = BarthezTokenizer
lowercase_ : List[Any] = BarthezTokenizerFast
lowercase_ : Dict = True
lowercase_ : int = True
def _a ( self ) -> Any:
super().setUp()
_UpperCAmelCase = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=a_ )
_UpperCAmelCase = tokenizer
def _a ( self ) -> List[Any]:
_UpperCAmelCase = "<pad>"
_UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(a_ ) , 101122 )
def _a ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def _a ( self ) -> List[Any]:
_UpperCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_UpperCAmelCase = [0, 57, 3018, 70307, 91, 2]
_UpperCAmelCase = self.tokenizer(
a_ , max_length=len(a_ ) , padding=a_ , truncation=a_ , return_tensors="pt" )
self.assertIsInstance(a_ , a_ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
_UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(a_ , a_ )
def _a ( self ) -> str:
if not self.test_rust_tokenizer:
return
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = "I was born in 92000, and this is falsé."
_UpperCAmelCase = tokenizer.tokenize(a_ )
_UpperCAmelCase = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
_UpperCAmelCase = tokenizer.encode(a_ , add_special_tokens=a_ )
_UpperCAmelCase = rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = tokenizer.encode(a_ )
_UpperCAmelCase = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
@slow
def _a ( self ) -> Dict:
# fmt: off
_UpperCAmelCase = {"input_ids": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_UpperCAmelCase = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=a_ , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=a_ , )
| 657 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__magic_name__ = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''MobileViTFeatureExtractor''']
__magic_name__ = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_UpperCAmelCase = f"Input value of [number={number}] must be an integer"
raise TypeError(UpperCamelCase__ )
if number < 0:
return False
_UpperCAmelCase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase ):
lowercase_ : Any = '''resnet'''
lowercase_ : Dict = ['''basic''', '''bottleneck''']
def __init__( self , a_=3 , a_=64 , a_=[256, 512, 1024, 2048] , a_=[3, 4, 6, 3] , a_="bottleneck" , a_="relu" , a_=False , a_=None , a_=None , **a_ , ) -> Tuple:
super().__init__(**a_ )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
_UpperCAmelCase = num_channels
_UpperCAmelCase = embedding_size
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = layer_type
_UpperCAmelCase = hidden_act
_UpperCAmelCase = downsample_in_first_stage
_UpperCAmelCase = ["stem"] + [f"stage{idx}" for idx in range(1 , len(a_ ) + 1 )]
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=a_ , out_indices=a_ , stage_names=self.stage_names )
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : Optional[Any] = version.parse('''1.11''' )
@property
def _a ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _a ( self ) -> float:
return 1e-3
| 657 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = Dict[str, Any]
__magic_name__ = List[Prediction]
@add_end_docstrings(lowerCamelCase )
class _lowerCAmelCase ( lowerCamelCase ):
def __init__( self , *a_ , **a_ ) -> Optional[int]:
super().__init__(*a_ , **a_ )
if self.framework == "tf":
raise ValueError(f"The {self.__class__} is only available in PyTorch." )
requires_backends(self , "vision" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def _a ( self , **a_ ) -> List[str]:
_UpperCAmelCase = {}
if "threshold" in kwargs:
_UpperCAmelCase = kwargs["threshold"]
return {}, {}, postprocess_kwargs
def __call__( self , *a_ , **a_ ) -> Union[Predictions, List[Prediction]]:
return super().__call__(*a_ , **a_ )
def _a ( self , a_ ) -> Optional[Any]:
_UpperCAmelCase = load_image(a_ )
_UpperCAmelCase = torch.IntTensor([[image.height, image.width]] )
_UpperCAmelCase = self.image_processor(images=[image] , return_tensors="pt" )
if self.tokenizer is not None:
_UpperCAmelCase = self.tokenizer(text=inputs["words"] , boxes=inputs["boxes"] , return_tensors="pt" )
_UpperCAmelCase = target_size
return inputs
def _a ( self , a_ ) -> Optional[Any]:
_UpperCAmelCase = model_inputs.pop("target_size" )
_UpperCAmelCase = self.model(**a_ )
_UpperCAmelCase = outputs.__class__({"target_size": target_size, **outputs} )
if self.tokenizer is not None:
_UpperCAmelCase = model_inputs["bbox"]
return model_outputs
def _a ( self , a_ , a_=0.9 ) -> int:
_UpperCAmelCase = model_outputs["target_size"]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
_UpperCAmelCase , _UpperCAmelCase = target_size[0].tolist()
def unnormalize(a_ ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
_UpperCAmelCase , _UpperCAmelCase = model_outputs["logits"].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
_UpperCAmelCase = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
_UpperCAmelCase = [unnormalize(a_ ) for bbox in model_outputs["bbox"].squeeze(0 )]
_UpperCAmelCase = ["score", "label", "box"]
_UpperCAmelCase = [dict(zip(a_ , a_ ) ) for vals in zip(scores.tolist() , a_ , a_ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
_UpperCAmelCase = self.image_processor.post_process_object_detection(a_ , a_ , a_ )
_UpperCAmelCase = raw_annotations[0]
_UpperCAmelCase = raw_annotation["scores"]
_UpperCAmelCase = raw_annotation["labels"]
_UpperCAmelCase = raw_annotation["boxes"]
_UpperCAmelCase = scores.tolist()
_UpperCAmelCase = [self.model.config.idalabel[label.item()] for label in labels]
_UpperCAmelCase = [self._get_bounding_box(a_ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
_UpperCAmelCase = ["score", "label", "box"]
_UpperCAmelCase = [
dict(zip(a_ , a_ ) )
for vals in zip(raw_annotation["scores"] , raw_annotation["labels"] , raw_annotation["boxes"] )
]
return annotation
def _a ( self , a_ ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError("The ObjectDetectionPipeline is only available in PyTorch." )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = box.int().tolist()
_UpperCAmelCase = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 657 | 1 |
"""simple docstring"""
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
__magic_name__ = Path(__file__).resolve().parents[3] / '''src'''
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
__magic_name__ = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''}
__magic_name__ = '''zero2'''
__magic_name__ = '''zero3'''
__magic_name__ = [ZEROa, ZEROa]
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = parameterized.to_safe_name("_".join(str(UpperCamelCase__ ) for x in param.args ) )
return f"{func.__name__}_{param_based_name}"
# Cartesian-product of zero stages with models to test
__magic_name__ = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class _lowerCAmelCase ( lowerCamelCase ):
@parameterized.expand(a_ , name_func=a_ )
def _a ( self , a_ , a_ ) -> int:
self.run_and_check(
stage=a_ , model=a_ , distributed=a_ , fpaa=a_ , )
@require_torch_multi_gpu
@parameterized.expand(a_ , name_func=a_ )
def _a ( self , a_ , a_ ) -> str:
self.run_and_check(
stage=a_ , model=a_ , distributed=a_ , fpaa=a_ , )
@parameterized.expand(a_ , name_func=a_ )
def _a ( self , a_ , a_ ) -> Optional[int]:
self.run_and_check(
stage=a_ , model=a_ , distributed=a_ , fpaa=a_ , )
@require_torch_multi_gpu
@parameterized.expand(a_ , name_func=a_ )
def _a ( self , a_ , a_ ) -> Any:
self.run_and_check(
stage=a_ , model=a_ , distributed=a_ , fpaa=a_ , )
def _a ( self , a_ ) -> List[Any]:
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def _a ( self , a_ , a_ , a_ = 10 , a_ = True , a_ = True , a_ = True , ) -> Union[str, Any]:
_UpperCAmelCase = models[model]
_UpperCAmelCase = self.run_trainer(
stage=a_ , model_name=a_ , eval_steps=a_ , num_train_epochs=1 , distributed=a_ , fpaa=a_ , )
self.do_checks(a_ )
return output_dir
def _a ( self , a_ , a_ , a_ = 10 , a_ = 1 , a_ = True , a_ = True , ) -> Dict:
_UpperCAmelCase = self.get_auto_remove_tmp_dir("./xxx" , after=a_ )
_UpperCAmelCase = f"\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(a_ )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n ".split()
if fpaa:
args.extend(["--fp16"] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
_UpperCAmelCase = f"--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json".split()
_UpperCAmelCase = [f"{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"]
_UpperCAmelCase = self.get_launcher(a_ )
_UpperCAmelCase = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(a_ , env=self.get_env() )
return output_dir
def _a ( self , a_=False ) -> List[Any]:
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
_UpperCAmelCase = min(2 , get_gpu_count() ) if distributed else 1
return f"deepspeed --num_nodes 1 --num_gpus {num_gpus}".split()
| 657 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def merge(UpperCamelCase__ , UpperCamelCase__ ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(UpperCamelCase__ ) <= 1:
return collection
_UpperCAmelCase = len(UpperCamelCase__ ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__magic_name__ = input('''Enter numbers separated by a comma:\n''').strip()
__magic_name__ = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 657 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__magic_name__ = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _lowerCAmelCase :
def __init__( self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ) -> List[str]:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = self.vocab_size - 1
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
_UpperCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> Optional[int]:
_UpperCAmelCase = OpenAIGPTModel(config=a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(a_ , token_type_ids=a_ , head_mask=a_ )
_UpperCAmelCase = model(a_ , token_type_ids=a_ )
_UpperCAmelCase = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> List[Any]:
_UpperCAmelCase = OpenAIGPTLMHeadModel(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> Optional[Any]:
_UpperCAmelCase = OpenAIGPTDoubleHeadsModel(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> Dict:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = OpenAIGPTForSequenceClassification(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase_ : Any = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowercase_ : Optional[Any] = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowercase_ : Union[str, Any] = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _a ( self , a_ , a_ , a_ , a_ , a_ ) -> Any:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _a ( self , a_ , a_ , a_=False ) -> Optional[int]:
_UpperCAmelCase = super()._prepare_for_class(a_ , a_ , return_labels=a_ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=a_ , )
_UpperCAmelCase = inputs_dict["labels"]
_UpperCAmelCase = inputs_dict["labels"]
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=a_ , )
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
return inputs_dict
def _a ( self ) -> Optional[int]:
_UpperCAmelCase = OpenAIGPTModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=a_ , n_embd=37 )
def _a ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _a ( self ) -> Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*a_ )
def _a ( self ) -> Tuple:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*a_ )
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*a_ )
@slow
def _a ( self ) -> int:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = OpenAIGPTModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def _a ( self ) -> Any:
_UpperCAmelCase = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" )
model.to(a_ )
_UpperCAmelCase = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=a_ ) # the president is
_UpperCAmelCase = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
_UpperCAmelCase = model.generate(a_ , do_sample=a_ )
self.assertListEqual(output_ids[0].tolist() , a_ )
| 657 | 1 |
"""simple docstring"""
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class _lowerCAmelCase :
def __init__( self , a_ , a_=14 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=True , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ) -> str:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_labels
_UpperCAmelCase = use_mc_token_ids
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = self.vocab_size - 1
def _a ( self ) -> Any:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
if self.use_mc_token_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = self.get_config()
_UpperCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _a ( self ) -> Tuple:
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def _a ( self , a_ , a_ , a_ , a_ , a_ , *a_ ) -> Any:
_UpperCAmelCase = CTRLModel(config=a_ )
model.to(a_ )
model.eval()
model(a_ , token_type_ids=a_ , head_mask=a_ )
model(a_ , token_type_ids=a_ )
_UpperCAmelCase = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def _a ( self , a_ , a_ , a_ , a_ , a_ , *a_ ) -> List[Any]:
_UpperCAmelCase = CTRLLMHeadModel(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self ) -> str:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask}
return config, inputs_dict
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> Optional[int]:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = CTRLForSequenceClassification(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase_ : int = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
lowercase_ : List[Any] = (CTRLLMHeadModel,) if is_torch_available() else ()
lowercase_ : Tuple = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase_ : int = True
lowercase_ : List[Any] = False
lowercase_ : List[str] = False
def _a ( self , a_ , a_ , a_ , a_ , a_ ) -> Any:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def _a ( self ) -> Tuple:
_UpperCAmelCase = CTRLModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=a_ , n_embd=37 )
def _a ( self ) -> Optional[Any]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def _a ( self ) -> Dict:
self.config_tester.run_common_tests()
def _a ( self ) -> Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*a_ )
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*a_ )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _a ( self ) -> Optional[Any]:
pass
@slow
def _a ( self ) -> List[str]:
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = CTRLModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :)
def _a ( self ) -> Optional[int]:
pass
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def _a ( self ) -> Any:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def _a ( self ) -> Any:
_UpperCAmelCase = CTRLLMHeadModel.from_pretrained("ctrl" )
model.to(a_ )
_UpperCAmelCase = torch.tensor(
[[11859, 0, 1611, 8]] , dtype=torch.long , device=a_ ) # Legal the president is
_UpperCAmelCase = [
11859,
0,
1611,
8,
5,
150,
26449,
2,
19,
348,
469,
3,
2595,
48,
20740,
246533,
246533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
_UpperCAmelCase = model.generate(a_ , do_sample=a_ )
self.assertListEqual(output_ids[0].tolist() , a_ )
| 657 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__=10 ):
"""simple docstring"""
_UpperCAmelCase = []
for _ in range(UpperCamelCase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__=10 ):
"""simple docstring"""
_UpperCAmelCase = []
for step in range(UpperCamelCase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = os.path.join(UpperCamelCase__ , "schedule.bin" )
torch.save(scheduler.state_dict() , UpperCamelCase__ )
_UpperCAmelCase = torch.load(UpperCamelCase__ )
scheduler.load_state_dict(UpperCamelCase__ )
return lrs
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def _a ( self , a_ , a_ , a_ ) -> Optional[int]:
self.assertEqual(len(a_ ) , len(a_ ) )
for a, b in zip(a_ , a_ ):
self.assertAlmostEqual(a_ , a_ , delta=a_ )
def _a ( self ) -> str:
_UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=a_ )
_UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
_UpperCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_UpperCAmelCase = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(100 ):
_UpperCAmelCase = criterion(a_ , a_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=a_ )
_UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
_UpperCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_UpperCAmelCase = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=a_ , weight_decay=0.0 , relative_step=a_ , scale_parameter=a_ , warmup_init=a_ , )
for _ in range(1000 ):
_UpperCAmelCase = criterion(a_ , a_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
lowercase_ : List[Any] = nn.Linear(50 , 50 ) if is_torch_available() else None
lowercase_ : Tuple = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
lowercase_ : Dict = 10
def _a ( self , a_ , a_ , a_ , a_=None ) -> Union[str, Any]:
self.assertEqual(len(a_ ) , len(a_ ) )
for a, b in zip(a_ , a_ ):
self.assertAlmostEqual(a_ , a_ , delta=a_ , msg=a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
_UpperCAmelCase = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
_UpperCAmelCase , _UpperCAmelCase = data
_UpperCAmelCase = scheduler_func(self.optimizer , **a_ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
_UpperCAmelCase = unwrap_schedule(a_ , self.num_steps )
self.assertListAlmostEqual(
a_ , a_ , tol=1e-2 , msg=f"failed for {scheduler_func} in normal scheduler" , )
_UpperCAmelCase = scheduler_func(self.optimizer , **a_ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(a_ ) # wrap to test picklability of the schedule
_UpperCAmelCase = unwrap_and_save_reload_schedule(a_ , self.num_steps )
self.assertListEqual(a_ , a_ , msg=f"failed for {scheduler_func} in save and reload" )
class _lowerCAmelCase :
def __init__( self , a_ ) -> Union[str, Any]:
_UpperCAmelCase = fn
def __call__( self , *a_ , **a_ ) -> Union[str, Any]:
return self.fn(*a_ , **a_ )
@classmethod
def _a ( self , a_ ) -> Dict:
_UpperCAmelCase = list(map(self , scheduler.lr_lambdas ) )
| 657 | 1 |
"""simple docstring"""
from __future__ import annotations
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = 0.00
_UpperCAmelCase = 0
for resistor in resistors:
if resistor <= 0:
_UpperCAmelCase = f"Resistor at index {index} has a negative or zero value!"
raise ValueError(UpperCamelCase__ )
first_sum += 1 / float(UpperCamelCase__ )
index += 1
return 1 / first_sum
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = 0.00
_UpperCAmelCase = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
_UpperCAmelCase = f"Resistor at index {index} has a negative value!"
raise ValueError(UpperCamelCase__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __lowerCamelCase ( UpperCamelCase__=None ):
"""simple docstring"""
if subparsers is not None:
_UpperCAmelCase = subparsers.add_parser("test" )
else:
_UpperCAmelCase = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=UpperCamelCase__ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase__ )
return parser
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
_UpperCAmelCase = script_name
else:
_UpperCAmelCase = f"--config_file={args.config_file} {script_name}"
_UpperCAmelCase = ["accelerate-launch"] + test_args.split()
_UpperCAmelCase = execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = test_command_parser()
_UpperCAmelCase = parser.parse_args()
test_command(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 657 | 1 |
"""simple docstring"""
import os
def __lowerCamelCase ( ):
"""simple docstring"""
with open(os.path.dirname(UpperCamelCase__ ) + "/p022_names.txt" ) as file:
_UpperCAmelCase = str(file.readlines()[0] )
_UpperCAmelCase = names.replace("\"" , "" ).split("," )
names.sort()
_UpperCAmelCase = 0
_UpperCAmelCase = 0
for i, name in enumerate(UpperCamelCase__ ):
for letter in name:
name_score += ord(UpperCamelCase__ ) - 64
total_score += (i + 1) * name_score
_UpperCAmelCase = 0
return total_score
if __name__ == "__main__":
print(solution())
| 657 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
return 10 - x * x
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if equation(UpperCamelCase__ ) * equation(UpperCamelCase__ ) >= 0:
raise ValueError("Wrong space!" )
_UpperCAmelCase = a
while (b - a) >= 0.01:
# Find middle point
_UpperCAmelCase = (a + b) / 2
# Check if middle point is root
if equation(UpperCamelCase__ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(UpperCamelCase__ ) * equation(UpperCamelCase__ ) < 0:
_UpperCAmelCase = c
else:
_UpperCAmelCase = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 657 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
__magic_name__ = {
'''google/tapas-base-finetuned-sqa''': (
'''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wtq''': (
'''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wikisql-supervised''': (
'''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-tabfact''': (
'''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'''
),
}
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : Optional[Any] = '''tapas'''
def __init__( self , a_=30522 , a_=768 , a_=12 , a_=12 , a_=3072 , a_="gelu" , a_=0.1 , a_=0.1 , a_=1024 , a_=[3, 256, 256, 2, 256, 256, 10] , a_=0.02 , a_=1e-12 , a_=0 , a_=10.0 , a_=0 , a_=1.0 , a_=None , a_=1.0 , a_=False , a_=None , a_=1.0 , a_=1.0 , a_=False , a_=False , a_="ratio" , a_=None , a_=None , a_=64 , a_=32 , a_=False , a_=True , a_=False , a_=False , a_=True , a_=False , a_=None , a_=None , **a_ , ) -> str:
super().__init__(pad_token_id=a_ , **a_ )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_sizes
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
# Fine-tuning task hyperparameters
_UpperCAmelCase = positive_label_weight
_UpperCAmelCase = num_aggregation_labels
_UpperCAmelCase = aggregation_loss_weight
_UpperCAmelCase = use_answer_as_supervision
_UpperCAmelCase = answer_loss_importance
_UpperCAmelCase = use_normalized_answer_loss
_UpperCAmelCase = huber_loss_delta
_UpperCAmelCase = temperature
_UpperCAmelCase = aggregation_temperature
_UpperCAmelCase = use_gumbel_for_cells
_UpperCAmelCase = use_gumbel_for_aggregation
_UpperCAmelCase = average_approximation_function
_UpperCAmelCase = cell_selection_preference
_UpperCAmelCase = answer_loss_cutoff
_UpperCAmelCase = max_num_rows
_UpperCAmelCase = max_num_columns
_UpperCAmelCase = average_logits_per_cell
_UpperCAmelCase = select_one_column
_UpperCAmelCase = allow_empty_column_selection
_UpperCAmelCase = init_cell_selection_weights_to_zero
_UpperCAmelCase = reset_position_index_per_cell
_UpperCAmelCase = disable_per_token_loss
# Aggregation hyperparameters
_UpperCAmelCase = aggregation_labels
_UpperCAmelCase = no_aggregation_label_index
if isinstance(self.aggregation_labels , a_ ):
_UpperCAmelCase = {int(a_ ): v for k, v in aggregation_labels.items()}
| 657 |
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
lowercase_ : Tuple = [r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self , a_ , a_ , a_ = None , a_ = 50257 , a_ = 1024 , a_ = 768 , a_ = 12 , a_ = 12 , a_ = None , a_ = "gelu_new" , a_ = 0.1 , a_ = 0.1 , a_ = 0.1 , a_ = 1e-5 , a_ = 0.02 , a_ = True , a_ = True , a_ = False , a_ = False , ) -> List[str]:
super().__init__()
_UpperCAmelCase = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"
f" `n_embd`: {n_embd} are not equal." )
_UpperCAmelCase = prefix_inner_dim
_UpperCAmelCase = prefix_hidden_dim
_UpperCAmelCase = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
_UpperCAmelCase = (
nn.Linear(self.prefix_hidden_dim , a_ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
_UpperCAmelCase = GPTaConfig(
vocab_size=a_ , n_positions=a_ , n_embd=a_ , n_layer=a_ , n_head=a_ , n_inner=a_ , activation_function=a_ , resid_pdrop=a_ , embd_pdrop=a_ , attn_pdrop=a_ , layer_norm_epsilon=a_ , initializer_range=a_ , scale_attn_weights=a_ , use_cache=a_ , scale_attn_by_inverse_layer_idx=a_ , reorder_and_upcast_attn=a_ , )
_UpperCAmelCase = GPTaLMHeadModel(a_ )
def _a ( self , a_ , a_ , a_ = None , a_ = None , ) -> Tuple:
_UpperCAmelCase = self.transformer.transformer.wte(a_ )
_UpperCAmelCase = self.encode_prefix(a_ )
_UpperCAmelCase = self.decode_prefix(a_ )
_UpperCAmelCase = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
_UpperCAmelCase = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
_UpperCAmelCase = torch.cat((dummy_token, input_ids) , dim=1 )
_UpperCAmelCase = self.transformer(inputs_embeds=a_ , labels=a_ , attention_mask=a_ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def _a ( self , a_ , a_ ) -> torch.Tensor:
return torch.zeros(a_ , self.prefix_length , dtype=torch.intaa , device=a_ )
def _a ( self , a_ ) -> Union[str, Any]:
return self.encode_prefix(a_ )
@torch.no_grad()
def _a ( self , a_ , a_ , a_ ) -> Union[str, Any]:
_UpperCAmelCase = torch.split(a_ , 1 , dim=0 )
_UpperCAmelCase = []
_UpperCAmelCase = []
for feature in features:
_UpperCAmelCase = self.decode_prefix(feature.to(a_ ) ) # back to the clip feature
# Only support beam search for now
_UpperCAmelCase , _UpperCAmelCase = self.generate_beam(
input_embeds=a_ , device=a_ , eos_token_id=a_ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
_UpperCAmelCase = torch.stack(a_ )
_UpperCAmelCase = torch.stack(a_ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def _a ( self , a_=None , a_=None , a_=None , a_ = 5 , a_ = 67 , a_ = 1.0 , a_ = None , ) -> Optional[Any]:
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = torch.ones(a_ , device=a_ , dtype=torch.int )
_UpperCAmelCase = torch.zeros(a_ , device=a_ , dtype=torch.bool )
if input_embeds is not None:
_UpperCAmelCase = input_embeds
else:
_UpperCAmelCase = self.transformer.transformer.wte(a_ )
for i in range(a_ ):
_UpperCAmelCase = self.transformer(inputs_embeds=a_ )
_UpperCAmelCase = outputs.logits
_UpperCAmelCase = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
_UpperCAmelCase = logits.softmax(-1 ).log()
if scores is None:
_UpperCAmelCase , _UpperCAmelCase = logits.topk(a_ , -1 )
_UpperCAmelCase = generated.expand(a_ , *generated.shape[1:] )
_UpperCAmelCase , _UpperCAmelCase = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
_UpperCAmelCase = next_tokens
else:
_UpperCAmelCase = tokens.expand(a_ , *tokens.shape[1:] )
_UpperCAmelCase = torch.cat((tokens, next_tokens) , dim=1 )
else:
_UpperCAmelCase = -float(np.inf )
_UpperCAmelCase = 0
_UpperCAmelCase = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
_UpperCAmelCase = scores_sum / seq_lengths[:, None]
_UpperCAmelCase , _UpperCAmelCase = scores_sum_average.view(-1 ).topk(a_ , -1 )
_UpperCAmelCase = next_tokens // scores_sum.shape[1]
_UpperCAmelCase = seq_lengths[next_tokens_source]
_UpperCAmelCase = next_tokens % scores_sum.shape[1]
_UpperCAmelCase = next_tokens.unsqueeze(1 )
_UpperCAmelCase = tokens[next_tokens_source]
_UpperCAmelCase = torch.cat((tokens, next_tokens) , dim=1 )
_UpperCAmelCase = generated[next_tokens_source]
_UpperCAmelCase = scores_sum_average * seq_lengths
_UpperCAmelCase = is_stopped[next_tokens_source]
_UpperCAmelCase = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
_UpperCAmelCase = torch.cat((generated, next_token_embed) , dim=1 )
_UpperCAmelCase = is_stopped + next_tokens.eq(a_ ).squeeze()
if is_stopped.all():
break
_UpperCAmelCase = scores / seq_lengths
_UpperCAmelCase = scores.argsort(descending=a_ )
# tokens tensors are already padded to max_seq_length
_UpperCAmelCase = [tokens[i] for i in order]
_UpperCAmelCase = torch.stack(a_ , dim=0 )
_UpperCAmelCase = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 657 | 1 |
"""simple docstring"""
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
__magic_name__ = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
__magic_name__ = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
__magic_name__ = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
__magic_name__ = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
__magic_name__ = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 14]),
('''2H 5D 3C AS 5S''', False, [14, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [14, 13, 12, 11, 10]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
__magic_name__ = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
__magic_name__ = (
('''JH AH TH KH QH''', 23),
('''JH 9H TH KH QH''', 22),
('''JC KH JS JD JH''', 21),
('''KH KC 3S 3H 3D''', 20),
('''8C 9C 5C 3C TC''', 19),
('''JS QS 9H TS KH''', 18),
('''7C 7S KH 2H 7H''', 17),
('''3C KH 5D 5S KH''', 16),
('''QH 8H KD JH 8S''', 15),
('''2D 6D 9D TH 7D''', 14),
)
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = randrange(len(UpperCamelCase__ ) ), randrange(len(UpperCamelCase__ ) )
_UpperCAmelCase = ["Loss", "Tie", "Win"][(play >= oppo) + (play > oppo)]
_UpperCAmelCase , _UpperCAmelCase = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def __lowerCamelCase ( UpperCamelCase__ = 100 ):
"""simple docstring"""
return (generate_random_hand() for _ in range(UpperCamelCase__ ))
@pytest.mark.parametrize("hand, expected" , UpperCamelCase__ )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
assert PokerHand(UpperCamelCase__ )._is_flush() == expected
@pytest.mark.parametrize("hand, expected" , UpperCamelCase__ )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
assert PokerHand(UpperCamelCase__ )._is_straight() == expected
@pytest.mark.parametrize("hand, expected, card_values" , UpperCamelCase__ )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = PokerHand(UpperCamelCase__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("hand, expected" , UpperCamelCase__ )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
assert PokerHand(UpperCamelCase__ )._is_same_kind() == expected
@pytest.mark.parametrize("hand, expected" , UpperCamelCase__ )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
assert PokerHand(UpperCamelCase__ )._hand_type == expected
@pytest.mark.parametrize("hand, other, expected" , UpperCamelCase__ )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
assert PokerHand(UpperCamelCase__ ).compare_with(PokerHand(UpperCamelCase__ ) ) == expected
@pytest.mark.parametrize("hand, other, expected" , generate_random_hands() )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
assert PokerHand(UpperCamelCase__ ).compare_with(PokerHand(UpperCamelCase__ ) ) == expected
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = [PokerHand(UpperCamelCase__ ) for hand in SORTED_HANDS]
_UpperCAmelCase = poker_hands.copy()
shuffle(UpperCamelCase__ )
_UpperCAmelCase = chain(sorted(UpperCamelCase__ ) )
for index, hand in enumerate(UpperCamelCase__ ):
assert hand == poker_hands[index]
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = [PokerHand("2D AC 3H 4H 5S" ), PokerHand("2S 3H 4H 5S 6C" )]
pokerhands.sort(reverse=UpperCamelCase__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = PokerHand("2C 4S AS 3D 5C" )
_UpperCAmelCase = True
_UpperCAmelCase = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = os.path.abspath(os.path.dirname(UpperCamelCase__ ) )
_UpperCAmelCase = os.path.join(UpperCamelCase__ , "poker_hands.txt" )
with open(UpperCamelCase__ ) as file_hand:
for line in file_hand:
_UpperCAmelCase = line[:14].strip()
_UpperCAmelCase = line[15:].strip()
_UpperCAmelCase , _UpperCAmelCase = PokerHand(UpperCamelCase__ ), PokerHand(UpperCamelCase__ )
_UpperCAmelCase = player.compare_with(UpperCamelCase__ )
if output == "Win":
answer += 1
assert answer == 376
| 657 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__magic_name__ = {'''configuration_gpt_neox''': ['''GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXConfig''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''GPTNeoXTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXForCausalLM''',
'''GPTNeoXForQuestionAnswering''',
'''GPTNeoXForSequenceClassification''',
'''GPTNeoXForTokenClassification''',
'''GPTNeoXLayer''',
'''GPTNeoXModel''',
'''GPTNeoXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 1 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : Any = '''Speech2TextFeatureExtractor'''
lowercase_ : str = '''Speech2TextTokenizer'''
def __init__( self , a_ , a_ ) -> Union[str, Any]:
super().__init__(a_ , a_ )
_UpperCAmelCase = self.feature_extractor
_UpperCAmelCase = False
def __call__( self , *a_ , **a_ ) -> int:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*a_ , **a_ )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
_UpperCAmelCase = kwargs.pop("raw_speech" )
else:
_UpperCAmelCase = kwargs.pop("audio" , a_ )
_UpperCAmelCase = kwargs.pop("sampling_rate" , a_ )
_UpperCAmelCase = kwargs.pop("text" , a_ )
if len(a_ ) > 0:
_UpperCAmelCase = args[0]
_UpperCAmelCase = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
_UpperCAmelCase = self.feature_extractor(a_ , *a_ , sampling_rate=a_ , **a_ )
if text is not None:
_UpperCAmelCase = self.tokenizer(a_ , **a_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_UpperCAmelCase = encodings["input_ids"]
return inputs
def _a ( self , *a_ , **a_ ) -> Optional[int]:
return self.tokenizer.batch_decode(*a_ , **a_ )
def _a ( self , *a_ , **a_ ) -> Optional[int]:
return self.tokenizer.decode(*a_ , **a_ )
@contextmanager
def _a ( self ) -> str:
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
_UpperCAmelCase = True
_UpperCAmelCase = self.tokenizer
yield
_UpperCAmelCase = self.feature_extractor
_UpperCAmelCase = False
| 657 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : Union[str, Any] = '''convbert'''
def __init__( self , a_=30522 , a_=768 , a_=12 , a_=12 , a_=3072 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=2 , a_=0.02 , a_=1e-12 , a_=1 , a_=0 , a_=2 , a_=768 , a_=2 , a_=9 , a_=1 , a_=None , **a_ , ) -> Tuple:
super().__init__(
pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ , )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = embedding_size
_UpperCAmelCase = head_ratio
_UpperCAmelCase = conv_kernel_size
_UpperCAmelCase = num_groups
_UpperCAmelCase = classifier_dropout
class _lowerCAmelCase ( lowerCamelCase ):
@property
def _a ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_UpperCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 657 | 1 |
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=True , UpperCamelCase__="pt" ):
"""simple docstring"""
_UpperCAmelCase = {"add_prefix_space": True} if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and not line.startswith(" " ) else {}
_UpperCAmelCase = padding_side
return tokenizer(
[line] , max_length=UpperCamelCase__ , padding="max_length" if pad_to_max_length else None , truncation=UpperCamelCase__ , return_tensors=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , ):
"""simple docstring"""
_UpperCAmelCase = input_ids.ne(UpperCamelCase__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class _lowerCAmelCase ( lowerCamelCase ):
def __init__( self , a_ , a_ , a_ , a_ , a_="train" , a_=None , a_=None , a_=None , a_="" , ) -> List[Any]:
super().__init__()
_UpperCAmelCase = Path(a_ ).joinpath(type_path + ".source" )
_UpperCAmelCase = Path(a_ ).joinpath(type_path + ".target" )
_UpperCAmelCase = self.get_char_lens(self.src_file )
_UpperCAmelCase = max_source_length
_UpperCAmelCase = max_target_length
assert min(self.src_lens ) > 0, f"found empty line in {self.src_file}"
_UpperCAmelCase = tokenizer
_UpperCAmelCase = prefix
if n_obs is not None:
_UpperCAmelCase = self.src_lens[:n_obs]
_UpperCAmelCase = src_lang
_UpperCAmelCase = tgt_lang
def __len__( self ) -> Any:
return len(self.src_lens )
def __getitem__( self , a_ ) -> Dict[str, torch.Tensor]:
_UpperCAmelCase = index + 1 # linecache starts at 1
_UpperCAmelCase = self.prefix + linecache.getline(str(self.src_file ) , a_ ).rstrip("\n" )
_UpperCAmelCase = linecache.getline(str(self.tgt_file ) , a_ ).rstrip("\n" )
assert source_line, f"empty source line for index {index}"
assert tgt_line, f"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , a_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_UpperCAmelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , a_ ) else self.tokenizer
)
_UpperCAmelCase = self.tokenizer.generator if isinstance(self.tokenizer , a_ ) else self.tokenizer
_UpperCAmelCase = encode_line(a_ , a_ , self.max_source_length , "right" )
_UpperCAmelCase = encode_line(a_ , a_ , self.max_target_length , "right" )
_UpperCAmelCase = source_inputs["input_ids"].squeeze()
_UpperCAmelCase = target_inputs["input_ids"].squeeze()
_UpperCAmelCase = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _a ( a_ ) -> Optional[int]:
return [len(a_ ) for x in Path(a_ ).open().readlines()]
def _a ( self , a_ ) -> Dict[str, torch.Tensor]:
_UpperCAmelCase = torch.stack([x["input_ids"] for x in batch] )
_UpperCAmelCase = torch.stack([x["attention_mask"] for x in batch] )
_UpperCAmelCase = torch.stack([x["decoder_input_ids"] for x in batch] )
_UpperCAmelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , a_ )
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , a_ )
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = trim_batch(a_ , a_ )
_UpperCAmelCase , _UpperCAmelCase = trim_batch(a_ , a_ , attention_mask=a_ )
_UpperCAmelCase = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
__magic_name__ = getLogger(__name__)
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
return list(itertools.chain.from_iterable(UpperCamelCase__ ) )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = get_git_info()
save_json(UpperCamelCase__ , os.path.join(UpperCamelCase__ , "git_log.json" ) )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=4 , **UpperCamelCase__ ):
"""simple docstring"""
with open(UpperCamelCase__ , "w" ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ , indent=UpperCamelCase__ , **UpperCamelCase__ )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
with open(UpperCamelCase__ ) as f:
return json.load(UpperCamelCase__ )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = git.Repo(search_parent_directories=UpperCamelCase__ )
_UpperCAmelCase = {
"repo_id": str(UpperCamelCase__ ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
return list(map(UpperCamelCase__ , UpperCamelCase__ ) )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
with open(UpperCamelCase__ , "wb" ) as f:
return pickle.dump(UpperCamelCase__ , UpperCamelCase__ )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def remove_articles(UpperCamelCase__ ):
return re.sub(r"\b(a|an|the)\b" , " " , UpperCamelCase__ )
def white_space_fix(UpperCamelCase__ ):
return " ".join(text.split() )
def remove_punc(UpperCamelCase__ ):
_UpperCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCamelCase__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCamelCase__ ) ) ) )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = normalize_answer(UpperCamelCase__ ).split()
_UpperCAmelCase = normalize_answer(UpperCamelCase__ ).split()
_UpperCAmelCase = Counter(UpperCamelCase__ ) & Counter(UpperCamelCase__ )
_UpperCAmelCase = sum(common.values() )
if num_same == 0:
return 0
_UpperCAmelCase = 1.0 * num_same / len(UpperCamelCase__ )
_UpperCAmelCase = 1.0 * num_same / len(UpperCamelCase__ )
_UpperCAmelCase = (2 * precision * recall) / (precision + recall)
return fa
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
return normalize_answer(UpperCamelCase__ ) == normalize_answer(UpperCamelCase__ )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ )
_UpperCAmelCase = 0
for hypo, pred in zip(UpperCamelCase__ , UpperCamelCase__ ):
em += exact_match_score(UpperCamelCase__ , UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
em /= len(UpperCamelCase__ )
return {"em": em}
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
return model_prefix.startswith("rag" )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_UpperCAmelCase = "dropout_rate"
for p in extra_params:
if getattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ) and not hasattr(UpperCamelCase__ , equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(UpperCamelCase__ ) )
delattr(UpperCamelCase__ , UpperCamelCase__ )
continue
_UpperCAmelCase = p if hasattr(UpperCamelCase__ , UpperCamelCase__ ) else equivalent_param[p]
setattr(UpperCamelCase__ , UpperCamelCase__ , getattr(UpperCamelCase__ , UpperCamelCase__ ) )
delattr(UpperCamelCase__ , UpperCamelCase__ )
return hparams, config
| 657 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
return "".join([hex(UpperCamelCase__ )[2:].zfill(2 ).upper() for byte in list(UpperCamelCase__ )] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if (len(UpperCamelCase__ ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(UpperCamelCase__ ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(UpperCamelCase__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 | 1 |
"""simple docstring"""
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
def _a ( self , a_ , a_ , a_ ) -> Any:
self.assertEqual(len(a_ ) , len(a_ ) )
for a, b in zip(a_ , a_ ):
self.assertAlmostEqual(a_ , a_ , delta=a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(a_ ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 )
def _a ( self ) -> Any:
_UpperCAmelCase = None
ops.enable_eager_execution_internal()
_UpperCAmelCase = tf.config.list_physical_devices("CPU" )
if len(a_ ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
_UpperCAmelCase = tf.config.list_logical_devices(device_type="CPU" )
_UpperCAmelCase = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
_UpperCAmelCase = GradientAccumulator()
_UpperCAmelCase = tf.Variable([4.0, 3.0] )
_UpperCAmelCase , _UpperCAmelCase = create_optimizer(5e-5 , 10 , 5 )
_UpperCAmelCase = tf.Variable([0.0, 0.0] , trainable=a_ )
def accumulate_on_replica(a_ ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(a_ , a_ ):
with strategy.scope():
_UpperCAmelCase = strategy.experimental_local_results(a_ )
local_variables[0].assign(a_ )
local_variables[1].assign(a_ )
strategy.run(a_ , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(a_ )
def _check_local_values(a_ , a_ ):
_UpperCAmelCase = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , a_ , tol=1e-2 )
self.assertListAlmostEqual(values[1].value() , a_ , tol=1e-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 657 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
try:
_UpperCAmelCase = float(UpperCamelCase__ )
except ValueError:
raise ValueError("Please enter a valid number" )
_UpperCAmelCase = decimal - int(UpperCamelCase__ )
if fractional_part == 0:
return int(UpperCamelCase__ ), 1
else:
_UpperCAmelCase = len(str(UpperCamelCase__ ).split("." )[1] )
_UpperCAmelCase = int(decimal * (10**number_of_frac_digits) )
_UpperCAmelCase = 10**number_of_frac_digits
_UpperCAmelCase , _UpperCAmelCase = denominator, numerator
while True:
_UpperCAmelCase = dividend % divisor
if remainder == 0:
break
_UpperCAmelCase , _UpperCAmelCase = divisor, remainder
_UpperCAmelCase , _UpperCAmelCase = numerator / divisor, denominator / divisor
return int(UpperCamelCase__ ), int(UpperCamelCase__ )
if __name__ == "__main__":
print(f'''{decimal_to_fraction(2) = }''')
print(f'''{decimal_to_fraction(89.0) = }''')
print(f'''{decimal_to_fraction("67") = }''')
print(f'''{decimal_to_fraction("45.0") = }''')
print(f'''{decimal_to_fraction(1.5) = }''')
print(f'''{decimal_to_fraction("6.25") = }''')
print(f'''{decimal_to_fraction("78td") = }''')
| 657 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase_ : str = IFPipeline
lowercase_ : Any = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
lowercase_ : Any = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase_ : List[Any] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def _a ( self ) -> Any:
return self._get_dummy_components()
def _a ( self , a_ , a_=0 ) -> List[str]:
if str(a_ ).startswith("mps" ):
_UpperCAmelCase = torch.manual_seed(a_ )
else:
_UpperCAmelCase = torch.Generator(device=a_ ).manual_seed(a_ )
_UpperCAmelCase = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _a ( self ) -> List[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def _a ( self ) -> Any:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _a ( self ) -> int:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _a ( self ) -> int:
self._test_save_load_local()
def _a ( self ) -> List[Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _a ( self ) -> int:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
def _a ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self ) -> Optional[Any]:
# if
_UpperCAmelCase = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa )
_UpperCAmelCase = IFSuperResolutionPipeline.from_pretrained(
"DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=a_ , tokenizer=a_ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("cuda" )
_UpperCAmelCase , _UpperCAmelCase = pipe_a.encode_prompt("anime turtle" , device="cuda" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
_UpperCAmelCase = None
_UpperCAmelCase = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(a_ , a_ , a_ , a_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
_UpperCAmelCase = IFImgaImgPipeline(**pipe_a.components )
_UpperCAmelCase = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(a_ , a_ , a_ , a_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
_UpperCAmelCase = IFInpaintingPipeline(**pipe_a.components )
_UpperCAmelCase = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(a_ , a_ , a_ , a_ )
def _a ( self , a_ , a_ , a_ , a_ ) -> Optional[int]:
# pipeline 1
_start_torch_memory_measurement()
_UpperCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 )
_UpperCAmelCase = pipe_a(
prompt_embeds=a_ , negative_prompt_embeds=a_ , num_inference_steps=2 , generator=a_ , output_type="np" , )
_UpperCAmelCase = output.images[0]
assert image.shape == (64, 64, 3)
_UpperCAmelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
_UpperCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" )
assert_mean_pixel_difference(a_ , a_ )
# pipeline 2
_start_torch_memory_measurement()
_UpperCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 )
_UpperCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a_ )
_UpperCAmelCase = pipe_a(
prompt_embeds=a_ , negative_prompt_embeds=a_ , image=a_ , generator=a_ , num_inference_steps=2 , output_type="np" , )
_UpperCAmelCase = output.images[0]
assert image.shape == (256, 256, 3)
_UpperCAmelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_UpperCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" )
assert_mean_pixel_difference(a_ , a_ )
def _a ( self , a_ , a_ , a_ , a_ ) -> str:
# pipeline 1
_start_torch_memory_measurement()
_UpperCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a_ )
_UpperCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 )
_UpperCAmelCase = pipe_a(
prompt_embeds=a_ , negative_prompt_embeds=a_ , image=a_ , num_inference_steps=2 , generator=a_ , output_type="np" , )
_UpperCAmelCase = output.images[0]
assert image.shape == (64, 64, 3)
_UpperCAmelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_UpperCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" )
assert_mean_pixel_difference(a_ , a_ )
# pipeline 2
_start_torch_memory_measurement()
_UpperCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 )
_UpperCAmelCase = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(a_ )
_UpperCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a_ )
_UpperCAmelCase = pipe_a(
prompt_embeds=a_ , negative_prompt_embeds=a_ , image=a_ , original_image=a_ , generator=a_ , num_inference_steps=2 , output_type="np" , )
_UpperCAmelCase = output.images[0]
assert image.shape == (256, 256, 3)
_UpperCAmelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_UpperCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" )
assert_mean_pixel_difference(a_ , a_ )
def _a ( self , a_ , a_ , a_ , a_ ) -> Union[str, Any]:
# pipeline 1
_start_torch_memory_measurement()
_UpperCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a_ )
_UpperCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(a_ )
_UpperCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 )
_UpperCAmelCase = pipe_a(
prompt_embeds=a_ , negative_prompt_embeds=a_ , image=a_ , mask_image=a_ , num_inference_steps=2 , generator=a_ , output_type="np" , )
_UpperCAmelCase = output.images[0]
assert image.shape == (64, 64, 3)
_UpperCAmelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_UpperCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" )
assert_mean_pixel_difference(a_ , a_ )
# pipeline 2
_start_torch_memory_measurement()
_UpperCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 )
_UpperCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a_ )
_UpperCAmelCase = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(a_ )
_UpperCAmelCase = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(a_ )
_UpperCAmelCase = pipe_a(
prompt_embeds=a_ , negative_prompt_embeds=a_ , image=a_ , mask_image=a_ , original_image=a_ , generator=a_ , num_inference_steps=2 , output_type="np" , )
_UpperCAmelCase = output.images[0]
assert image.shape == (256, 256, 3)
_UpperCAmelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_UpperCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" )
assert_mean_pixel_difference(a_ , a_ )
def __lowerCamelCase ( ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 657 |
"""simple docstring"""
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_UpperCAmelCase = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
_UpperCAmelCase = f"{src_lang}-{tgt_lang}"
_UpperCAmelCase = f"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n"
model_card_dir.mkdir(parents=UpperCamelCase__ , exist_ok=UpperCamelCase__ )
_UpperCAmelCase = os.path.join(UpperCamelCase__ , "README.md" )
print(f"Generating {path}" )
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as f:
f.write(UpperCamelCase__ )
# make sure we are under the root of the project
__magic_name__ = Path(__file__).resolve().parent.parent.parent
__magic_name__ = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
__magic_name__ = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name)
| 657 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _lowerCAmelCase ( metaclass=lowerCamelCase ):
lowercase_ : Dict = ['''torch''', '''torchsde''']
def __init__( self , *a_ , **a_ ) -> Optional[int]:
requires_backends(self , ["torch", "torchsde"] )
@classmethod
def _a ( cls , *a_ , **a_ ) -> Optional[Any]:
requires_backends(cls , ["torch", "torchsde"] )
@classmethod
def _a ( cls , *a_ , **a_ ) -> List[Any]:
requires_backends(cls , ["torch", "torchsde"] )
| 657 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
__magic_name__ = {
'''configuration_speech_to_text''': ['''SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Speech2TextConfig'''],
'''processing_speech_to_text''': ['''Speech2TextProcessor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''Speech2TextTokenizer''']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''Speech2TextFeatureExtractor''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSpeech2TextForConditionalGeneration''',
'''TFSpeech2TextModel''',
'''TFSpeech2TextPreTrainedModel''',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Speech2TextForConditionalGeneration''',
'''Speech2TextModel''',
'''Speech2TextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
__magic_name__ = logging.get_logger(__name__)
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : Optional[Any] = '''upernet'''
def __init__( self , a_=None , a_=512 , a_=0.02 , a_=[1, 2, 3, 6] , a_=True , a_=0.4 , a_=384 , a_=256 , a_=1 , a_=False , a_=255 , **a_ , ) -> List[Any]:
super().__init__(**a_ )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_UpperCAmelCase = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(a_ , a_ ):
_UpperCAmelCase = backbone_config.get("model_type" )
_UpperCAmelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase = config_class.from_dict(a_ )
_UpperCAmelCase = backbone_config
_UpperCAmelCase = hidden_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = pool_scales
_UpperCAmelCase = use_auxiliary_head
_UpperCAmelCase = auxiliary_loss_weight
_UpperCAmelCase = auxiliary_in_channels
_UpperCAmelCase = auxiliary_channels
_UpperCAmelCase = auxiliary_num_convs
_UpperCAmelCase = auxiliary_concat_input
_UpperCAmelCase = loss_ignore_index
def _a ( self ) -> int:
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.backbone_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
| 657 | 1 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError("The length of profit and weight must be same." )
if max_weight <= 0:
raise ValueError("max_weight must greater than zero." )
if any(p < 0 for p in profit ):
raise ValueError("Profit can not be negative." )
if any(w < 0 for w in weight ):
raise ValueError("Weight can not be negative." )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
_UpperCAmelCase = [p / w for p, w in zip(UpperCamelCase__ , UpperCamelCase__ )]
# Creating a copy of the list and sorting profit/weight in ascending order
_UpperCAmelCase = sorted(UpperCamelCase__ )
# declaring useful variables
_UpperCAmelCase = len(UpperCamelCase__ )
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
_UpperCAmelCase = sorted_profit_by_weight[length - i - 1]
_UpperCAmelCase = profit_by_weight.index(UpperCamelCase__ )
_UpperCAmelCase = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'''Input profits, weights, and then max_weight (all positive ints) separated by '''
'''spaces.'''
)
__magic_name__ = [int(x) for x in input('''Input profits separated by spaces: ''').split()]
__magic_name__ = [int(x) for x in input('''Input weights separated by spaces: ''').split()]
__magic_name__ = int(input('''Max weight allowed: '''))
# Function Call
calc_profit(profit, weight, max_weight)
| 657 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import _LazyModule
__magic_name__ = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 657 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = {}
_UpperCAmelCase = 2
while True:
_UpperCAmelCase = factor_map.pop(UpperCamelCase__ , UpperCamelCase__ )
if factor:
_UpperCAmelCase = factor + prime
while x in factor_map:
x += factor
_UpperCAmelCase = factor
else:
_UpperCAmelCase = prime
yield prime
prime += 1
def __lowerCamelCase ( UpperCamelCase__ = 1E10 ):
"""simple docstring"""
_UpperCAmelCase = sieve()
_UpperCAmelCase = 1
while True:
_UpperCAmelCase = next(UpperCamelCase__ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(UpperCamelCase__ )
n += 2
if __name__ == "__main__":
print(solution())
| 657 |
"""simple docstring"""
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCAmelCase :
def __init__( self , a_ , a_=2 , a_=3 , a_=4 , a_=2 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=99 , a_=36 , a_=3 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=16 , a_=2 , a_=0.02 , a_=6 , a_=6 , a_=3 , a_=4 , a_=None , a_=1000 , ) -> Optional[Any]:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = text_seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = coordinate_size
_UpperCAmelCase = shape_size
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_UpperCAmelCase = text_seq_length
_UpperCAmelCase = (image_size // patch_size) ** 2 + 1
_UpperCAmelCase = self.text_seq_length + self.image_seq_length
def _a ( self ) -> Dict:
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_UpperCAmelCase = bbox[i, j, 3]
_UpperCAmelCase = bbox[i, j, 1]
_UpperCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_UpperCAmelCase = bbox[i, j, 2]
_UpperCAmelCase = bbox[i, j, 0]
_UpperCAmelCase = t
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.text_seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_UpperCAmelCase = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Tuple:
_UpperCAmelCase = LayoutLMvaModel(config=a_ )
model.to(a_ )
model.eval()
# text + image
_UpperCAmelCase = model(a_ , pixel_values=a_ )
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ )
_UpperCAmelCase = model(a_ , bbox=a_ , pixel_values=a_ , token_type_ids=a_ )
_UpperCAmelCase = model(a_ , bbox=a_ , pixel_values=a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_UpperCAmelCase = model(a_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_UpperCAmelCase = model(pixel_values=a_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Optional[Any]:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = LayoutLMvaForSequenceClassification(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Union[str, Any]:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = LayoutLMvaForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Dict:
_UpperCAmelCase = LayoutLMvaForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , start_positions=a_ , end_positions=a_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self ) -> Optional[int]:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase_ : Any = False
lowercase_ : Dict = False
lowercase_ : List[str] = False
lowercase_ : str = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase_ : int = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def _a ( self , a_ , a_ , a_ , a_ , a_ ) -> List[str]:
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = LayoutLMvaModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=a_ , hidden_size=37 )
def _a ( self , a_ , a_ , a_=False ) -> List[str]:
_UpperCAmelCase = copy.deepcopy(a_ )
if model_class in get_values(a_ ):
_UpperCAmelCase = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(a_ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(a_ ):
_UpperCAmelCase = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=a_ )
elif model_class in get_values(a_ ):
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
elif model_class in [
*get_values(a_ ),
]:
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
elif model_class in [
*get_values(a_ ),
]:
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=a_ , )
return inputs_dict
def _a ( self ) -> int:
self.config_tester.run_common_tests()
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase = type
self.model_tester.create_and_check_model(*a_ )
def _a ( self ) -> int:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a_ )
def _a ( self ) -> Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a_ )
@slow
def _a ( self ) -> List[str]:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = LayoutLMvaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _a ( self ) -> List[Any]:
return LayoutLMvaImageProcessor(apply_ocr=a_ ) if is_vision_available() else None
@slow
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(a_ )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=a_ , return_tensors="pt" ).pixel_values.to(a_ )
_UpperCAmelCase = torch.tensor([[1, 2]] )
_UpperCAmelCase = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
_UpperCAmelCase = model(
input_ids=input_ids.to(a_ ) , bbox=bbox.to(a_ ) , pixel_values=pixel_values.to(a_ ) , )
# verify the logits
_UpperCAmelCase = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , a_ )
_UpperCAmelCase = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , a_ , atol=1e-4 ) )
| 657 | 1 |
"""simple docstring"""
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
__magic_name__ = {
'''facebook/maskformer-swin-base-ade''': (
'''https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'''
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
__magic_name__ = logging.get_logger(__name__)
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : Union[str, Any] = '''maskformer'''
lowercase_ : Union[str, Any] = {'''hidden_size''': '''mask_feature_size'''}
lowercase_ : Dict = ['''resnet''', '''swin''']
lowercase_ : Optional[Any] = ['''detr''']
def __init__( self , a_ = 256 , a_ = 256 , a_ = 0.1 , a_ = False , a_ = None , a_ = None , a_ = 0.02 , a_ = 1.0 , a_ = 1.0 , a_ = 1.0 , a_ = 20.0 , a_ = None , **a_ , ) -> int:
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
_UpperCAmelCase = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(a_ , a_ ):
_UpperCAmelCase = backbone_config.pop("model_type" )
_UpperCAmelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase = config_class.from_dict(a_ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. "
f"Supported model types: {','.join(self.backbones_supported )}" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
_UpperCAmelCase = DetrConfig()
else:
# verify that the decoder is supported
_UpperCAmelCase = (
decoder_config.pop("model_type" ) if isinstance(a_ , a_ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f"Transformer Decoder {decoder_type} not supported, please use one of"
f" {','.join(self.decoders_supported )}" )
if isinstance(a_ , a_ ):
_UpperCAmelCase = CONFIG_MAPPING[decoder_type]
_UpperCAmelCase = config_class.from_dict(a_ )
_UpperCAmelCase = backbone_config
_UpperCAmelCase = decoder_config
# main feature dimension for the model
_UpperCAmelCase = fpn_feature_size
_UpperCAmelCase = mask_feature_size
# initializer
_UpperCAmelCase = init_std
_UpperCAmelCase = init_xavier_std
# Hungarian matcher && loss
_UpperCAmelCase = cross_entropy_weight
_UpperCAmelCase = dice_weight
_UpperCAmelCase = mask_weight
_UpperCAmelCase = use_auxiliary_loss
_UpperCAmelCase = no_object_weight
_UpperCAmelCase = output_auxiliary_logits
_UpperCAmelCase = self.decoder_config.encoder_attention_heads
_UpperCAmelCase = self.decoder_config.num_hidden_layers
super().__init__(**a_ )
@classmethod
def _a ( cls , a_ , a_ , **a_ ) -> Tuple:
return cls(
backbone_config=a_ , decoder_config=a_ , **a_ , )
def _a ( self ) -> Dict[str, any]:
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.backbone_config.to_dict()
_UpperCAmelCase = self.decoder_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
| 657 |
"""simple docstring"""
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _lowerCAmelCase ( unittest.TestCase ):
lowercase_ : str = MODEL_FOR_MASKED_LM_MAPPING
lowercase_ : List[str] = TF_MODEL_FOR_MASKED_LM_MAPPING
def _a ( self ) -> Optional[Any]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def _a ( self ) -> str:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf" )
_UpperCAmelCase = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is grouped", "score": 2.1e-05, "token": 38015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-05, "token": 25506, "token_str": " accuser"},
] , )
_UpperCAmelCase = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-05,
"token": 38015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-05,
"token": 25506,
"token_str": " accuser",
},
] , )
_UpperCAmelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-05, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt" )
_UpperCAmelCase = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is Maul", "score": 2.2e-05, "token": 35676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
_UpperCAmelCase = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
_UpperCAmelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is Patrick", "score": 2.1e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-05, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
] , )
_UpperCAmelCase = unmasker("My name is <mask> <mask>" , top_k=2 )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def _a ( self ) -> int:
_UpperCAmelCase = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt" )
# convert model to fp16
pipe.model.half()
_UpperCAmelCase = pipe("Paris is the [MASK] of France." )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(a_ , a_ )
@slow
@require_torch
def _a ( self ) -> int:
_UpperCAmelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt" )
self.run_large_test(a_ )
@slow
@require_tf
def _a ( self ) -> int:
_UpperCAmelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf" )
self.run_large_test(a_ )
def _a ( self , a_ ) -> int:
_UpperCAmelCase = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(a_ ) , [
{"sequence": "My name is John", "score": 0.008, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.007, "token": 1573, "token_str": " Chris"},
] , )
_UpperCAmelCase = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(a_ ) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.251,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.214,
"token": 12790,
"token_str": " Lyon",
},
] , )
_UpperCAmelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(a_ ) , [
{"sequence": "My name is Patrick", "score": 0.005, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.000, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.000, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def _a ( self ) -> Any:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt" )
_UpperCAmelCase = None
_UpperCAmelCase = None
self.run_pipeline_test(a_ , [] )
@require_tf
def _a ( self ) -> List[Any]:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf" )
_UpperCAmelCase = None
_UpperCAmelCase = None
self.run_pipeline_test(a_ , [] )
def _a ( self , a_ , a_ , a_ ) -> Optional[Any]:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)" )
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = [
f"This is another {tokenizer.mask_token} test",
]
return fill_masker, examples
def _a ( self , a_ , a_ ) -> List[str]:
_UpperCAmelCase = fill_masker.tokenizer
_UpperCAmelCase = fill_masker.model
_UpperCAmelCase = fill_masker(
f"This is a {tokenizer.mask_token}" , )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = fill_masker([f"This is a {tokenizer.mask_token}"] )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = fill_masker([f"This is a {tokenizer.mask_token}", f"Another {tokenizer.mask_token} great test."] )
self.assertEqual(
a_ , [
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
] , )
with self.assertRaises(a_ ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(a_ ):
fill_masker("This is" )
self.run_test_top_k(a_ , a_ )
self.run_test_targets(a_ , a_ )
self.run_test_top_k_targets(a_ , a_ )
self.fill_mask_with_duplicate_targets_and_top_k(a_ , a_ )
self.fill_mask_with_multiple_masks(a_ , a_ )
def _a ( self , a_ , a_ ) -> Optional[int]:
_UpperCAmelCase = tokenizer.get_vocab()
_UpperCAmelCase = sorted(vocab.keys() )[:2]
# Pipeline argument
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ , targets=a_ )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , a_ )
_UpperCAmelCase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(a_ ) )
# Call argument
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=a_ )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , a_ )
_UpperCAmelCase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(a_ ) )
# Score equivalence
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=a_ )
_UpperCAmelCase = [top_mask["token_str"] for top_mask in outputs]
_UpperCAmelCase = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(a_ ) == set(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=a_ )
_UpperCAmelCase = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(a_ ) , nested_simplify(a_ ) )
# Raises with invalid
with self.assertRaises(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=[""] )
with self.assertRaises(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets="" )
def _a ( self , a_ , a_ ) -> str:
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ , top_k=2 )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
self.assertEqual(nested_simplify(a_ ) , nested_simplify(a_ ) )
def _a ( self , a_ , a_ ) -> List[Any]:
_UpperCAmelCase = tokenizer.get_vocab()
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
# top_k=2, ntargets=3
_UpperCAmelCase = sorted(vocab.keys() )[:3]
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=2 , targets=a_ )
# If we use the most probably targets, and filter differently, we should still
# have the same results
_UpperCAmelCase = [el["token_str"] for el in sorted(a_ , key=lambda a_ : x["score"] , reverse=a_ )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(a_ ).issubset(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=3 , targets=a_ )
# They should yield exactly the same result
self.assertEqual(nested_simplify(a_ ) , nested_simplify(a_ ) )
def _a ( self , a_ , a_ ) -> Optional[Any]:
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = tokenizer.get_vocab()
# String duplicates + id duplicates
_UpperCAmelCase = sorted(vocab.keys() )[:3]
_UpperCAmelCase = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_UpperCAmelCase = fill_masker(f"My name is {tokenizer.mask_token}" , targets=a_ , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(a_ ) , 3 )
def _a ( self , a_ , a_ ) -> Any:
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = fill_masker(
f"This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
a_ , [
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
] , )
| 657 | 1 |
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class _lowerCAmelCase :
@staticmethod
def _a ( *a_ , **a_ ) -> str:
pass
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
lowercase_ : Any = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def _a ( self , a_ , a_ , a_ ) -> List[Any]:
_UpperCAmelCase = DepthEstimationPipeline(model=a_ , image_processor=a_ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _a ( self , a_ , a_ ) -> Dict:
_UpperCAmelCase = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" )
self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , a_ )
import datasets
_UpperCAmelCase = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
_UpperCAmelCase = depth_estimator(
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
] )
self.assertEqual(
[
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
] , a_ , )
@require_tf
@unittest.skip("Depth estimation is not implemented in TF" )
def _a ( self ) -> Dict:
pass
@slow
@require_torch
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = "Intel/dpt-large"
_UpperCAmelCase = pipeline("depth-estimation" , model=a_ )
_UpperCAmelCase = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" )
_UpperCAmelCase = hashimage(outputs["depth"] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.662 )
@require_torch
def _a ( self ) -> Optional[Any]:
# This is highly irregular to have no small tests.
self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
| 657 |
"""simple docstring"""
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class _lowerCAmelCase ( lowerCamelCase ):
def _a ( self ) -> List[str]:
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def _a ( self ) -> Optional[int]:
with self.assertRaises(a_ ):
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def _a ( self ) -> int:
with self.assertRaises(a_ ):
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("bool" ) , type=Value("int64" ) ) )
def _a ( self ) -> Optional[Any]:
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _a ( self ) -> int:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , type=Value("int64" ) ) )
def _a ( self ) -> Dict:
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , try_type=Value("int64" ) ) )
self.assertEqual(arr.type , pa.string() )
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def _a ( self ) -> Tuple:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , type=ArrayaD((1, 3) , "int64" ) ) )
def _a ( self ) -> str:
_UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def _a ( self ) -> Tuple:
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def _a ( self ) -> List[str]:
import PIL.Image
_UpperCAmelCase = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"datasets.arrow_writer.cast_to_python_objects" , side_effect=a_ ) as mock_cast_to_python_objects:
_UpperCAmelCase = pa.array(TypedSequence([{"path": None, "bytes": B"image_bytes"}, pil_image] , type=Image() ) )
_UpperCAmelCase , _UpperCAmelCase = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("optimize_list_casting" , a_ )
self.assertFalse(kwargs["optimize_list_casting"] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferReader(UpperCamelCase__ ) if isinstance(UpperCamelCase__ , pa.Buffer ) else pa.memory_map(UpperCamelCase__ )
_UpperCAmelCase = pa.ipc.open_stream(UpperCamelCase__ )
_UpperCAmelCase = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = Features({"labels": ClassLabel(names=["neg", "pos"] )} )
with ArrowWriter(stream=UpperCamelCase__ , features=UpperCamelCase__ ) as writer:
writer.write({"labels": 0} )
writer.write({"labels": 1} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pa.ipc.open_stream(UpperCamelCase__ )
_UpperCAmelCase = f.read_all()
_UpperCAmelCase = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(UpperCamelCase__ )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ , hash_salt="split_name" , check_duplicates=UpperCamelCase__ , ) as writer:
with pytest.raises(UpperCamelCase__ ):
writer.write({"col_1": "foo", "col_2": 1} , key=[1, 2] )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ , hash_salt="split_name" , check_duplicates=UpperCamelCase__ , ) as writer:
with pytest.raises(UpperCamelCase__ ):
writer.write({"col_1": "foo", "col_2": 1} , key=10 )
writer.write({"col_1": "bar", "col_2": 2} , key=10 )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ , hash_salt="split_name" , check_duplicates=UpperCamelCase__ , ) as writer:
writer.write({"col_1": "foo", "col_2": 1} , key=1 )
writer.write({"col_1": "bar", "col_2": 2} , key=2 )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
writer.write_batch({"col_1": [], "col_2": []} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write_table(pa.Table.from_pydict({"col_1": ["foo", "bar"], "col_2": [1, 2]} ) )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write_row(pa.Table.from_pydict({"col_1": ["foo"], "col_2": [1]} ) )
writer.write_row(pa.Table.from_pydict({"col_1": ["bar"], "col_2": [2]} ) )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __lowerCamelCase ( ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
_UpperCAmelCase = os.path.join(UpperCamelCase__ , "test.arrow" )
with ArrowWriter(path=UpperCamelCase__ , schema=pa.schema(UpperCamelCase__ ) ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(UpperCamelCase__ , 1 )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if pa.types.is_list(UpperCamelCase__ ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if isinstance(lst[0] , UpperCamelCase__ ):
change_first_primitive_element_in_list(lst[0] , UpperCamelCase__ )
else:
_UpperCAmelCase = value
@pytest.mark.parametrize("optimized_int_type, expected_dtype" , [(None, pa.intaa()), (Value("int32" ), pa.intaa())] )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.array(TypedSequence(UpperCamelCase__ , optimized_int_type=UpperCamelCase__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"col, expected_dtype" , [
("attention_mask", pa.inta()),
("special_tokens_mask", pa.inta()),
("token_type_ids", pa.inta()),
("input_ids", pa.intaa()),
("other", pa.intaa()),
] , )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.array(OptimizedTypedSequence(UpperCamelCase__ , col=UpperCamelCase__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
_UpperCAmelCase = copy.deepcopy(UpperCamelCase__ )
_UpperCAmelCase = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase = pa.array(OptimizedTypedSequence(UpperCamelCase__ , col=UpperCamelCase__ ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("raise_exception" , [False, True] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = str(tmp_path / "dataset-train.arrow" )
try:
with ArrowWriter(path=UpperCamelCase__ ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = "mock://dataset-train.arrow"
with ArrowWriter(path=UpperCamelCase__ , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(UpperCamelCase__ ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(UpperCamelCase__ )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(stream=UpperCamelCase__ ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pq.read_table(UpperCamelCase__ )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("embed_local_files" , [False, True] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
import PIL.Image
_UpperCAmelCase = str(tmp_path / "test_image_rgb.jpg" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(UpperCamelCase__ , format="png" )
_UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(
stream=UpperCamelCase__ , features=Features({"image": Image()} ) , embed_local_files=UpperCamelCase__ ) as writer:
writer.write({"image": image_path} )
writer.finalize()
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pq.read_table(UpperCamelCase__ )
_UpperCAmelCase = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["image"][0]["path"] , UpperCamelCase__ )
with open(UpperCamelCase__ , "rb" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = pa.schema([pa.field("col_1" , pa.string() , nullable=UpperCamelCase__ )] )
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(stream=UpperCamelCase__ ) as writer:
writer._build_writer(inferred_schema=UpperCamelCase__ )
assert writer._schema == pa.schema([pa.field("col_1" , pa.string() )] )
| 657 | 1 |
"""simple docstring"""
from math import pi
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 657 |
"""simple docstring"""
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class _lowerCAmelCase ( unittest.TestCase ):
def _a ( self ) -> Optional[Any]:
_UpperCAmelCase = ["a", "b", "c"]
# Defaults to last layer if both are None
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(a_ , a_ , a_ )
self.assertEqual(a_ , ["c"] )
self.assertEqual(a_ , [2] )
# Out indices set to match out features
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(["a", "c"] , a_ , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features set to match out indices
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(a_ , [0, 2] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features selected from negative indices
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(a_ , [-3, -1] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [-3, -1] )
def _a ( self ) -> Optional[int]:
# Stage names must be set
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , a_ )
# Out features must be a list
with self.assertRaises(a_ ):
verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] )
# Out features must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] )
# Out indices must be a list or tuple
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , 0 , ["a", "b"] )
# Out indices must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , (0, 1) , ["a"] )
# Out features and out indices must be the same length
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] )
# Out features should match out indices
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] )
# Out features and out indices should be in order
with self.assertRaises(a_ ):
verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] )
# Check passes with valid inputs
verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] )
def _a ( self ) -> int:
_UpperCAmelCase = BackboneMixin()
_UpperCAmelCase = ["a", "b", "c"]
_UpperCAmelCase = ["a", "c"]
_UpperCAmelCase = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
_UpperCAmelCase = ["a", "b"]
self.assertEqual(backbone.out_features , ["a", "b"] )
self.assertEqual(backbone.out_indices , [0, 1] )
_UpperCAmelCase = [-3, -1]
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 657 | 1 |
"""simple docstring"""
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
__magic_name__ = logging.getLogger()
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = "\n".join(UpperCamelCase__ )
Path(UpperCamelCase__ ).open("w" ).writelines(UpperCamelCase__ )
__magic_name__ = '''patrickvonplaten/t5-tiny-random'''
__magic_name__ = '''sshleifer/bart-tiny-random'''
__magic_name__ = '''sshleifer/tiny-mbart'''
__magic_name__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class _lowerCAmelCase ( lowerCamelCase ):
def _a ( self , a_ ) -> List[Any]:
_UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
_UpperCAmelCase = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
_UpperCAmelCase = [" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."]
_dump_articles(a_ , a_ )
_UpperCAmelCase = str(Path(self.get_auto_remove_tmp_dir() ) / "scores.json" )
_UpperCAmelCase = "translation_en_to_de" if model == T5_TINY else "summarization"
_UpperCAmelCase = f"\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n ".split()
with patch.object(a_ , "argv" , a_ ):
run_generate()
assert Path(a_ ).exists()
# os.remove(Path(output_file_name))
def _a ( self ) -> List[str]:
self.run_eval_tester(a_ )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def _a ( self , a_ ) -> List[Any]:
self.run_eval_tester(a_ )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def _a ( self , a_ ) -> List[str]:
_UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
_UpperCAmelCase = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
_UpperCAmelCase = {
"en": ["Machine learning is great, isn't it?", "I like to eat bananas", "Tomorrow is another great day!"],
"de": [
"Maschinelles Lernen ist großartig, oder?",
"Ich esse gerne Bananen",
"Morgen ist wieder ein toller Tag!",
],
}
_UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() )
_UpperCAmelCase = str(tmp_dir / "scores.json" )
_UpperCAmelCase = str(tmp_dir / "val.target" )
_dump_articles(a_ , text["en"] )
_dump_articles(a_ , text["de"] )
_UpperCAmelCase = "translation_en_to_de" if model == T5_TINY else "summarization"
_UpperCAmelCase = f"\n run_eval_search.py\n {model}\n {str(a_ )}\n {str(a_ )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n ".split()
testargs.extend(["--search", "num_beams=1:2 length_penalty=0.9:1.0"] )
with patch.object(a_ , "argv" , a_ ):
with CaptureStdout() as cs:
run_search()
_UpperCAmelCase = [" num_beams | length_penalty", model, "Best score args"]
_UpperCAmelCase = ["Info"]
if "translation" in task:
expected_strings.append("bleu" )
else:
expected_strings.extend(a_ )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(a_ ).exists()
os.remove(Path(a_ ) )
| 657 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 1 |
"""simple docstring"""
from __future__ import annotations
import requests
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = f"https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"
return requests.get(UpperCamelCase__ ).json()
def __lowerCamelCase ( UpperCamelCase__ = 10 ):
"""simple docstring"""
_UpperCAmelCase = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"
_UpperCAmelCase = requests.get(UpperCamelCase__ ).json()[:max_stories]
return [get_hackernews_story(UpperCamelCase__ ) for story_id in story_ids]
def __lowerCamelCase ( UpperCamelCase__ = 10 ):
"""simple docstring"""
_UpperCAmelCase = hackernews_top_stories(UpperCamelCase__ )
return "\n".join("* [{title}]({url})".format(**UpperCamelCase__ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 657 |
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class _lowerCAmelCase ( lowerCamelCase , unittest.TestCase ):
lowercase_ : Tuple = BarthezTokenizer
lowercase_ : List[Any] = BarthezTokenizerFast
lowercase_ : Dict = True
lowercase_ : int = True
def _a ( self ) -> Any:
super().setUp()
_UpperCAmelCase = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=a_ )
_UpperCAmelCase = tokenizer
def _a ( self ) -> List[Any]:
_UpperCAmelCase = "<pad>"
_UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(a_ ) , 101122 )
def _a ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def _a ( self ) -> List[Any]:
_UpperCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_UpperCAmelCase = [0, 57, 3018, 70307, 91, 2]
_UpperCAmelCase = self.tokenizer(
a_ , max_length=len(a_ ) , padding=a_ , truncation=a_ , return_tensors="pt" )
self.assertIsInstance(a_ , a_ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
_UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(a_ , a_ )
def _a ( self ) -> str:
if not self.test_rust_tokenizer:
return
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = "I was born in 92000, and this is falsé."
_UpperCAmelCase = tokenizer.tokenize(a_ )
_UpperCAmelCase = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
_UpperCAmelCase = tokenizer.encode(a_ , add_special_tokens=a_ )
_UpperCAmelCase = rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = tokenizer.encode(a_ )
_UpperCAmelCase = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
@slow
def _a ( self ) -> Dict:
# fmt: off
_UpperCAmelCase = {"input_ids": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_UpperCAmelCase = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=a_ , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=a_ , )
| 657 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ = {
'''configuration_clap''': [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapAudioConfig''',
'''ClapConfig''',
'''ClapTextConfig''',
],
'''processing_clap''': ['''ClapProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapModel''',
'''ClapPreTrainedModel''',
'''ClapTextModel''',
'''ClapTextModelWithProjection''',
'''ClapAudioModel''',
'''ClapAudioModelWithProjection''',
]
__magic_name__ = ['''ClapFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_UpperCAmelCase = f"Input value of [number={number}] must be an integer"
raise TypeError(UpperCamelCase__ )
if number < 0:
return False
_UpperCAmelCase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : Optional[Any] = '''markuplm'''
def __init__( self , a_=30522 , a_=768 , a_=12 , a_=12 , a_=3072 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=2 , a_=0.02 , a_=1e-12 , a_=0 , a_=0 , a_=2 , a_=256 , a_=1024 , a_=216 , a_=1001 , a_=32 , a_=50 , a_="absolute" , a_=True , a_=None , **a_ , ) -> Tuple:
super().__init__(
pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ , )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = position_embedding_type
_UpperCAmelCase = use_cache
_UpperCAmelCase = classifier_dropout
# additional properties
_UpperCAmelCase = max_depth
_UpperCAmelCase = max_xpath_tag_unit_embeddings
_UpperCAmelCase = max_xpath_subs_unit_embeddings
_UpperCAmelCase = tag_pad_id
_UpperCAmelCase = subs_pad_id
_UpperCAmelCase = xpath_unit_hidden_size
| 657 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = Dict[str, Any]
__magic_name__ = List[Prediction]
@add_end_docstrings(lowerCamelCase )
class _lowerCAmelCase ( lowerCamelCase ):
def __init__( self , *a_ , **a_ ) -> Optional[int]:
super().__init__(*a_ , **a_ )
if self.framework == "tf":
raise ValueError(f"The {self.__class__} is only available in PyTorch." )
requires_backends(self , "vision" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def _a ( self , **a_ ) -> List[str]:
_UpperCAmelCase = {}
if "threshold" in kwargs:
_UpperCAmelCase = kwargs["threshold"]
return {}, {}, postprocess_kwargs
def __call__( self , *a_ , **a_ ) -> Union[Predictions, List[Prediction]]:
return super().__call__(*a_ , **a_ )
def _a ( self , a_ ) -> Optional[Any]:
_UpperCAmelCase = load_image(a_ )
_UpperCAmelCase = torch.IntTensor([[image.height, image.width]] )
_UpperCAmelCase = self.image_processor(images=[image] , return_tensors="pt" )
if self.tokenizer is not None:
_UpperCAmelCase = self.tokenizer(text=inputs["words"] , boxes=inputs["boxes"] , return_tensors="pt" )
_UpperCAmelCase = target_size
return inputs
def _a ( self , a_ ) -> Optional[Any]:
_UpperCAmelCase = model_inputs.pop("target_size" )
_UpperCAmelCase = self.model(**a_ )
_UpperCAmelCase = outputs.__class__({"target_size": target_size, **outputs} )
if self.tokenizer is not None:
_UpperCAmelCase = model_inputs["bbox"]
return model_outputs
def _a ( self , a_ , a_=0.9 ) -> int:
_UpperCAmelCase = model_outputs["target_size"]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
_UpperCAmelCase , _UpperCAmelCase = target_size[0].tolist()
def unnormalize(a_ ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
_UpperCAmelCase , _UpperCAmelCase = model_outputs["logits"].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
_UpperCAmelCase = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
_UpperCAmelCase = [unnormalize(a_ ) for bbox in model_outputs["bbox"].squeeze(0 )]
_UpperCAmelCase = ["score", "label", "box"]
_UpperCAmelCase = [dict(zip(a_ , a_ ) ) for vals in zip(scores.tolist() , a_ , a_ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
_UpperCAmelCase = self.image_processor.post_process_object_detection(a_ , a_ , a_ )
_UpperCAmelCase = raw_annotations[0]
_UpperCAmelCase = raw_annotation["scores"]
_UpperCAmelCase = raw_annotation["labels"]
_UpperCAmelCase = raw_annotation["boxes"]
_UpperCAmelCase = scores.tolist()
_UpperCAmelCase = [self.model.config.idalabel[label.item()] for label in labels]
_UpperCAmelCase = [self._get_bounding_box(a_ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
_UpperCAmelCase = ["score", "label", "box"]
_UpperCAmelCase = [
dict(zip(a_ , a_ ) )
for vals in zip(raw_annotation["scores"] , raw_annotation["labels"] , raw_annotation["boxes"] )
]
return annotation
def _a ( self , a_ ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError("The ObjectDetectionPipeline is only available in PyTorch." )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = box.int().tolist()
_UpperCAmelCase = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 657 | 1 |
"""simple docstring"""
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCAmelCase :
def __init__( self , a_ , a_=2 , a_=3 , a_=4 , a_=2 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=99 , a_=36 , a_=3 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=16 , a_=2 , a_=0.02 , a_=6 , a_=6 , a_=3 , a_=4 , a_=None , a_=1000 , ) -> Optional[Any]:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = text_seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = coordinate_size
_UpperCAmelCase = shape_size
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_UpperCAmelCase = text_seq_length
_UpperCAmelCase = (image_size // patch_size) ** 2 + 1
_UpperCAmelCase = self.text_seq_length + self.image_seq_length
def _a ( self ) -> Dict:
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_UpperCAmelCase = bbox[i, j, 3]
_UpperCAmelCase = bbox[i, j, 1]
_UpperCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_UpperCAmelCase = bbox[i, j, 2]
_UpperCAmelCase = bbox[i, j, 0]
_UpperCAmelCase = t
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.text_seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_UpperCAmelCase = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Tuple:
_UpperCAmelCase = LayoutLMvaModel(config=a_ )
model.to(a_ )
model.eval()
# text + image
_UpperCAmelCase = model(a_ , pixel_values=a_ )
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ )
_UpperCAmelCase = model(a_ , bbox=a_ , pixel_values=a_ , token_type_ids=a_ )
_UpperCAmelCase = model(a_ , bbox=a_ , pixel_values=a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_UpperCAmelCase = model(a_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_UpperCAmelCase = model(pixel_values=a_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Optional[Any]:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = LayoutLMvaForSequenceClassification(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Union[str, Any]:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = LayoutLMvaForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Dict:
_UpperCAmelCase = LayoutLMvaForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , start_positions=a_ , end_positions=a_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self ) -> Optional[int]:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase_ : Any = False
lowercase_ : Dict = False
lowercase_ : List[str] = False
lowercase_ : str = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase_ : int = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def _a ( self , a_ , a_ , a_ , a_ , a_ ) -> List[str]:
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = LayoutLMvaModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=a_ , hidden_size=37 )
def _a ( self , a_ , a_ , a_=False ) -> List[str]:
_UpperCAmelCase = copy.deepcopy(a_ )
if model_class in get_values(a_ ):
_UpperCAmelCase = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(a_ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(a_ ):
_UpperCAmelCase = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=a_ )
elif model_class in get_values(a_ ):
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
elif model_class in [
*get_values(a_ ),
]:
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
elif model_class in [
*get_values(a_ ),
]:
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=a_ , )
return inputs_dict
def _a ( self ) -> int:
self.config_tester.run_common_tests()
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase = type
self.model_tester.create_and_check_model(*a_ )
def _a ( self ) -> int:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a_ )
def _a ( self ) -> Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a_ )
@slow
def _a ( self ) -> List[str]:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = LayoutLMvaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _a ( self ) -> List[Any]:
return LayoutLMvaImageProcessor(apply_ocr=a_ ) if is_vision_available() else None
@slow
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(a_ )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=a_ , return_tensors="pt" ).pixel_values.to(a_ )
_UpperCAmelCase = torch.tensor([[1, 2]] )
_UpperCAmelCase = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
_UpperCAmelCase = model(
input_ids=input_ids.to(a_ ) , bbox=bbox.to(a_ ) , pixel_values=pixel_values.to(a_ ) , )
# verify the logits
_UpperCAmelCase = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , a_ )
_UpperCAmelCase = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , a_ , atol=1e-4 ) )
| 657 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def merge(UpperCamelCase__ , UpperCamelCase__ ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(UpperCamelCase__ ) <= 1:
return collection
_UpperCAmelCase = len(UpperCamelCase__ ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__magic_name__ = input('''Enter numbers separated by a comma:\n''').strip()
__magic_name__ = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 657 | 1 |
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class _lowerCAmelCase ( lowerCamelCase , unittest.TestCase ):
lowercase_ : Tuple = BarthezTokenizer
lowercase_ : List[Any] = BarthezTokenizerFast
lowercase_ : Dict = True
lowercase_ : int = True
def _a ( self ) -> Any:
super().setUp()
_UpperCAmelCase = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=a_ )
_UpperCAmelCase = tokenizer
def _a ( self ) -> List[Any]:
_UpperCAmelCase = "<pad>"
_UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(a_ ) , 101122 )
def _a ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def _a ( self ) -> List[Any]:
_UpperCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_UpperCAmelCase = [0, 57, 3018, 70307, 91, 2]
_UpperCAmelCase = self.tokenizer(
a_ , max_length=len(a_ ) , padding=a_ , truncation=a_ , return_tensors="pt" )
self.assertIsInstance(a_ , a_ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
_UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(a_ , a_ )
def _a ( self ) -> str:
if not self.test_rust_tokenizer:
return
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = "I was born in 92000, and this is falsé."
_UpperCAmelCase = tokenizer.tokenize(a_ )
_UpperCAmelCase = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
_UpperCAmelCase = tokenizer.encode(a_ , add_special_tokens=a_ )
_UpperCAmelCase = rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = tokenizer.encode(a_ )
_UpperCAmelCase = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
@slow
def _a ( self ) -> Dict:
# fmt: off
_UpperCAmelCase = {"input_ids": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_UpperCAmelCase = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=a_ , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=a_ , )
| 657 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _lowerCAmelCase :
def __init__( self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ) -> List[str]:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = self.vocab_size - 1
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
_UpperCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> Optional[int]:
_UpperCAmelCase = OpenAIGPTModel(config=a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(a_ , token_type_ids=a_ , head_mask=a_ )
_UpperCAmelCase = model(a_ , token_type_ids=a_ )
_UpperCAmelCase = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> List[Any]:
_UpperCAmelCase = OpenAIGPTLMHeadModel(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> Optional[Any]:
_UpperCAmelCase = OpenAIGPTDoubleHeadsModel(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> Dict:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = OpenAIGPTForSequenceClassification(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase_ : Any = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowercase_ : Optional[Any] = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowercase_ : Union[str, Any] = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _a ( self , a_ , a_ , a_ , a_ , a_ ) -> Any:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _a ( self , a_ , a_ , a_=False ) -> Optional[int]:
_UpperCAmelCase = super()._prepare_for_class(a_ , a_ , return_labels=a_ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=a_ , )
_UpperCAmelCase = inputs_dict["labels"]
_UpperCAmelCase = inputs_dict["labels"]
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=a_ , )
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
return inputs_dict
def _a ( self ) -> Optional[int]:
_UpperCAmelCase = OpenAIGPTModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=a_ , n_embd=37 )
def _a ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _a ( self ) -> Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*a_ )
def _a ( self ) -> Tuple:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*a_ )
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*a_ )
@slow
def _a ( self ) -> int:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = OpenAIGPTModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def _a ( self ) -> Any:
_UpperCAmelCase = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" )
model.to(a_ )
_UpperCAmelCase = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=a_ ) # the president is
_UpperCAmelCase = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
_UpperCAmelCase = model.generate(a_ , do_sample=a_ )
self.assertListEqual(output_ids[0].tolist() , a_ )
| 657 | 1 |
"""simple docstring"""
from __future__ import annotations
def __lowerCamelCase ( UpperCamelCase__ ): # This function is recursive
"""simple docstring"""
_UpperCAmelCase = len(UpperCamelCase__ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
_UpperCAmelCase = array[0]
_UpperCAmelCase = False
_UpperCAmelCase = 1
_UpperCAmelCase = []
while not is_found and i < array_length:
if array[i] < pivot:
_UpperCAmelCase = True
_UpperCAmelCase = [element for element in array[i:] if element >= array[i]]
_UpperCAmelCase = longest_subsequence(UpperCamelCase__ )
if len(UpperCamelCase__ ) > len(UpperCamelCase__ ):
_UpperCAmelCase = temp_array
else:
i += 1
_UpperCAmelCase = [element for element in array[1:] if element >= pivot]
_UpperCAmelCase = [pivot, *longest_subsequence(UpperCamelCase__ )]
if len(UpperCamelCase__ ) > len(UpperCamelCase__ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__=10 ):
"""simple docstring"""
_UpperCAmelCase = []
for _ in range(UpperCamelCase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__=10 ):
"""simple docstring"""
_UpperCAmelCase = []
for step in range(UpperCamelCase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = os.path.join(UpperCamelCase__ , "schedule.bin" )
torch.save(scheduler.state_dict() , UpperCamelCase__ )
_UpperCAmelCase = torch.load(UpperCamelCase__ )
scheduler.load_state_dict(UpperCamelCase__ )
return lrs
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def _a ( self , a_ , a_ , a_ ) -> Optional[int]:
self.assertEqual(len(a_ ) , len(a_ ) )
for a, b in zip(a_ , a_ ):
self.assertAlmostEqual(a_ , a_ , delta=a_ )
def _a ( self ) -> str:
_UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=a_ )
_UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
_UpperCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_UpperCAmelCase = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(100 ):
_UpperCAmelCase = criterion(a_ , a_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=a_ )
_UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
_UpperCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_UpperCAmelCase = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=a_ , weight_decay=0.0 , relative_step=a_ , scale_parameter=a_ , warmup_init=a_ , )
for _ in range(1000 ):
_UpperCAmelCase = criterion(a_ , a_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
lowercase_ : List[Any] = nn.Linear(50 , 50 ) if is_torch_available() else None
lowercase_ : Tuple = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
lowercase_ : Dict = 10
def _a ( self , a_ , a_ , a_ , a_=None ) -> Union[str, Any]:
self.assertEqual(len(a_ ) , len(a_ ) )
for a, b in zip(a_ , a_ ):
self.assertAlmostEqual(a_ , a_ , delta=a_ , msg=a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
_UpperCAmelCase = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
_UpperCAmelCase , _UpperCAmelCase = data
_UpperCAmelCase = scheduler_func(self.optimizer , **a_ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
_UpperCAmelCase = unwrap_schedule(a_ , self.num_steps )
self.assertListAlmostEqual(
a_ , a_ , tol=1e-2 , msg=f"failed for {scheduler_func} in normal scheduler" , )
_UpperCAmelCase = scheduler_func(self.optimizer , **a_ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(a_ ) # wrap to test picklability of the schedule
_UpperCAmelCase = unwrap_and_save_reload_schedule(a_ , self.num_steps )
self.assertListEqual(a_ , a_ , msg=f"failed for {scheduler_func} in save and reload" )
class _lowerCAmelCase :
def __init__( self , a_ ) -> Union[str, Any]:
_UpperCAmelCase = fn
def __call__( self , *a_ , **a_ ) -> Union[str, Any]:
return self.fn(*a_ , **a_ )
@classmethod
def _a ( self , a_ ) -> Dict:
_UpperCAmelCase = list(map(self , scheduler.lr_lambdas ) )
| 657 | 1 |
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__magic_name__ = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
__magic_name__ = {'''facebook/blenderbot_small-90M''': 5_12}
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = set()
_UpperCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_UpperCAmelCase = char
_UpperCAmelCase = set(UpperCamelCase__ )
return pairs
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : Optional[Any] = VOCAB_FILES_NAMES
lowercase_ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : Union[str, Any] = ['''input_ids''', '''attention_mask''']
def __init__( self , a_ , a_ , a_="__start__" , a_="__end__" , a_="__unk__" , a_="__null__" , **a_ , ) -> int:
super().__init__(unk_token=a_ , bos_token=a_ , eos_token=a_ , pad_token=a_ , **a_ )
with open(a_ , encoding="utf-8" ) as vocab_handle:
_UpperCAmelCase = json.load(a_ )
_UpperCAmelCase = {v: k for k, v in self.encoder.items()}
with open(a_ , encoding="utf-8" ) as merges_handle:
_UpperCAmelCase = merges_handle.read().split("\n" )[1:-1]
_UpperCAmelCase = [tuple(merge.split() ) for merge in merges]
_UpperCAmelCase = dict(zip(a_ , range(len(a_ ) ) ) )
_UpperCAmelCase = {}
@property
def _a ( self ) -> int:
return len(self.encoder )
def _a ( self ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def _a ( self , a_ ) -> str:
if token in self.cache:
return self.cache[token]
_UpperCAmelCase = re.sub("([.,!?()])" , r" \1" , a_ )
_UpperCAmelCase = re.sub("(')" , r" \1 " , a_ )
_UpperCAmelCase = re.sub(r"\s{2,}" , " " , a_ )
if "\n" in token:
_UpperCAmelCase = token.replace("\n" , " __newln__" )
_UpperCAmelCase = token.split(" " )
_UpperCAmelCase = []
for token in tokens:
if not len(a_ ):
continue
_UpperCAmelCase = token.lower()
_UpperCAmelCase = tuple(a_ )
_UpperCAmelCase = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
_UpperCAmelCase = get_pairs(a_ )
if not pairs:
words.append(a_ )
continue
while True:
_UpperCAmelCase = min(a_ , key=lambda a_ : self.bpe_ranks.get(a_ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
_UpperCAmelCase , _UpperCAmelCase = bigram
_UpperCAmelCase = []
_UpperCAmelCase = 0
while i < len(a_ ):
try:
_UpperCAmelCase = word.index(a_ , a_ )
new_word.extend(word[i:j] )
_UpperCAmelCase = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(a_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_UpperCAmelCase = tuple(a_ )
_UpperCAmelCase = new_word
if len(a_ ) == 1:
break
else:
_UpperCAmelCase = get_pairs(a_ )
_UpperCAmelCase = "@@ ".join(a_ )
_UpperCAmelCase = word[:-4]
_UpperCAmelCase = word
words.append(a_ )
return " ".join(a_ )
def _a ( self , a_ ) -> List[str]:
_UpperCAmelCase = []
_UpperCAmelCase = re.findall(r"\S+\n?" , a_ )
for token in words:
split_tokens.extend(list(self.bpe(a_ ).split(" " ) ) )
return split_tokens
def _a ( self , a_ ) -> int:
_UpperCAmelCase = token.lower()
return self.encoder.get(a_ , self.encoder.get(self.unk_token ) )
def _a ( self , a_ ) -> str:
return self.decoder.get(a_ , self.unk_token )
def _a ( self , a_ ) -> str:
_UpperCAmelCase = " ".join(a_ ).replace("@@ " , "" ).strip()
return out_string
def _a ( self , a_ , a_ = None ) -> Tuple[str]:
if not os.path.isdir(a_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_UpperCAmelCase = os.path.join(
a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
_UpperCAmelCase = os.path.join(
a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(a_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=a_ , ensure_ascii=a_ ) + "\n" )
_UpperCAmelCase = 0
with open(a_ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a_ : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
_UpperCAmelCase = token_index
writer.write(" ".join(a_ ) + "\n" )
index += 1
return vocab_file, merge_file
| 657 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __lowerCamelCase ( UpperCamelCase__=None ):
"""simple docstring"""
if subparsers is not None:
_UpperCAmelCase = subparsers.add_parser("test" )
else:
_UpperCAmelCase = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=UpperCamelCase__ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase__ )
return parser
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
_UpperCAmelCase = script_name
else:
_UpperCAmelCase = f"--config_file={args.config_file} {script_name}"
_UpperCAmelCase = ["accelerate-launch"] + test_args.split()
_UpperCAmelCase = execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = test_command_parser()
_UpperCAmelCase = parser.parse_args()
test_command(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 657 | 1 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=UpperCamelCase__ )
_UpperCAmelCase = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=UpperCamelCase__ )
env_command_parser(subparsers=UpperCamelCase__ )
launch_command_parser(subparsers=UpperCamelCase__ )
tpu_command_parser(subparsers=UpperCamelCase__ )
test_command_parser(subparsers=UpperCamelCase__ )
# Let's go
_UpperCAmelCase = parser.parse_args()
if not hasattr(UpperCamelCase__ , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 657 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
return 10 - x * x
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if equation(UpperCamelCase__ ) * equation(UpperCamelCase__ ) >= 0:
raise ValueError("Wrong space!" )
_UpperCAmelCase = a
while (b - a) >= 0.01:
# Find middle point
_UpperCAmelCase = (a + b) / 2
# Check if middle point is root
if equation(UpperCamelCase__ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(UpperCamelCase__ ) * equation(UpperCamelCase__ ) < 0:
_UpperCAmelCase = c
else:
_UpperCAmelCase = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 657 | 1 |
"""simple docstring"""
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_UpperCAmelCase = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
_UpperCAmelCase = f"{src_lang}-{tgt_lang}"
_UpperCAmelCase = f"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n"
model_card_dir.mkdir(parents=UpperCamelCase__ , exist_ok=UpperCamelCase__ )
_UpperCAmelCase = os.path.join(UpperCamelCase__ , "README.md" )
print(f"Generating {path}" )
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as f:
f.write(UpperCamelCase__ )
# make sure we are under the root of the project
__magic_name__ = Path(__file__).resolve().parent.parent.parent
__magic_name__ = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
__magic_name__ = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name)
| 657 |
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
lowercase_ : Tuple = [r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self , a_ , a_ , a_ = None , a_ = 50257 , a_ = 1024 , a_ = 768 , a_ = 12 , a_ = 12 , a_ = None , a_ = "gelu_new" , a_ = 0.1 , a_ = 0.1 , a_ = 0.1 , a_ = 1e-5 , a_ = 0.02 , a_ = True , a_ = True , a_ = False , a_ = False , ) -> List[str]:
super().__init__()
_UpperCAmelCase = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"
f" `n_embd`: {n_embd} are not equal." )
_UpperCAmelCase = prefix_inner_dim
_UpperCAmelCase = prefix_hidden_dim
_UpperCAmelCase = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
_UpperCAmelCase = (
nn.Linear(self.prefix_hidden_dim , a_ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
_UpperCAmelCase = GPTaConfig(
vocab_size=a_ , n_positions=a_ , n_embd=a_ , n_layer=a_ , n_head=a_ , n_inner=a_ , activation_function=a_ , resid_pdrop=a_ , embd_pdrop=a_ , attn_pdrop=a_ , layer_norm_epsilon=a_ , initializer_range=a_ , scale_attn_weights=a_ , use_cache=a_ , scale_attn_by_inverse_layer_idx=a_ , reorder_and_upcast_attn=a_ , )
_UpperCAmelCase = GPTaLMHeadModel(a_ )
def _a ( self , a_ , a_ , a_ = None , a_ = None , ) -> Tuple:
_UpperCAmelCase = self.transformer.transformer.wte(a_ )
_UpperCAmelCase = self.encode_prefix(a_ )
_UpperCAmelCase = self.decode_prefix(a_ )
_UpperCAmelCase = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
_UpperCAmelCase = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
_UpperCAmelCase = torch.cat((dummy_token, input_ids) , dim=1 )
_UpperCAmelCase = self.transformer(inputs_embeds=a_ , labels=a_ , attention_mask=a_ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def _a ( self , a_ , a_ ) -> torch.Tensor:
return torch.zeros(a_ , self.prefix_length , dtype=torch.intaa , device=a_ )
def _a ( self , a_ ) -> Union[str, Any]:
return self.encode_prefix(a_ )
@torch.no_grad()
def _a ( self , a_ , a_ , a_ ) -> Union[str, Any]:
_UpperCAmelCase = torch.split(a_ , 1 , dim=0 )
_UpperCAmelCase = []
_UpperCAmelCase = []
for feature in features:
_UpperCAmelCase = self.decode_prefix(feature.to(a_ ) ) # back to the clip feature
# Only support beam search for now
_UpperCAmelCase , _UpperCAmelCase = self.generate_beam(
input_embeds=a_ , device=a_ , eos_token_id=a_ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
_UpperCAmelCase = torch.stack(a_ )
_UpperCAmelCase = torch.stack(a_ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def _a ( self , a_=None , a_=None , a_=None , a_ = 5 , a_ = 67 , a_ = 1.0 , a_ = None , ) -> Optional[Any]:
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = torch.ones(a_ , device=a_ , dtype=torch.int )
_UpperCAmelCase = torch.zeros(a_ , device=a_ , dtype=torch.bool )
if input_embeds is not None:
_UpperCAmelCase = input_embeds
else:
_UpperCAmelCase = self.transformer.transformer.wte(a_ )
for i in range(a_ ):
_UpperCAmelCase = self.transformer(inputs_embeds=a_ )
_UpperCAmelCase = outputs.logits
_UpperCAmelCase = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
_UpperCAmelCase = logits.softmax(-1 ).log()
if scores is None:
_UpperCAmelCase , _UpperCAmelCase = logits.topk(a_ , -1 )
_UpperCAmelCase = generated.expand(a_ , *generated.shape[1:] )
_UpperCAmelCase , _UpperCAmelCase = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
_UpperCAmelCase = next_tokens
else:
_UpperCAmelCase = tokens.expand(a_ , *tokens.shape[1:] )
_UpperCAmelCase = torch.cat((tokens, next_tokens) , dim=1 )
else:
_UpperCAmelCase = -float(np.inf )
_UpperCAmelCase = 0
_UpperCAmelCase = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
_UpperCAmelCase = scores_sum / seq_lengths[:, None]
_UpperCAmelCase , _UpperCAmelCase = scores_sum_average.view(-1 ).topk(a_ , -1 )
_UpperCAmelCase = next_tokens // scores_sum.shape[1]
_UpperCAmelCase = seq_lengths[next_tokens_source]
_UpperCAmelCase = next_tokens % scores_sum.shape[1]
_UpperCAmelCase = next_tokens.unsqueeze(1 )
_UpperCAmelCase = tokens[next_tokens_source]
_UpperCAmelCase = torch.cat((tokens, next_tokens) , dim=1 )
_UpperCAmelCase = generated[next_tokens_source]
_UpperCAmelCase = scores_sum_average * seq_lengths
_UpperCAmelCase = is_stopped[next_tokens_source]
_UpperCAmelCase = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
_UpperCAmelCase = torch.cat((generated, next_token_embed) , dim=1 )
_UpperCAmelCase = is_stopped + next_tokens.eq(a_ ).squeeze()
if is_stopped.all():
break
_UpperCAmelCase = scores / seq_lengths
_UpperCAmelCase = scores.argsort(descending=a_ )
# tokens tensors are already padded to max_seq_length
_UpperCAmelCase = [tokens[i] for i in order]
_UpperCAmelCase = torch.stack(a_ , dim=0 )
_UpperCAmelCase = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 657 | 1 |
"""simple docstring"""
import itertools
import string
from collections.abc import Generator, Iterable
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = iter(UpperCamelCase__ )
while True:
_UpperCAmelCase = tuple(itertools.islice(UpperCamelCase__ , UpperCamelCase__ ) )
if not chunk:
return
yield chunk
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = "".join([c.upper() for c in dirty if c in string.ascii_letters] )
_UpperCAmelCase = ""
if len(UpperCamelCase__ ) < 2:
return dirty
for i in range(len(UpperCamelCase__ ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(UpperCamelCase__ ) & 1:
clean += "X"
return clean
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = "ABCDEFGHIKLMNOPQRSTUVWXYZ"
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
_UpperCAmelCase = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(UpperCamelCase__ )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(UpperCamelCase__ )
return table
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = generate_table(UpperCamelCase__ )
_UpperCAmelCase = prepare_input(UpperCamelCase__ )
_UpperCAmelCase = ""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(UpperCamelCase__ , 2 ):
_UpperCAmelCase , _UpperCAmelCase = divmod(table.index(UpperCamelCase__ ) , 5 )
_UpperCAmelCase , _UpperCAmelCase = divmod(table.index(UpperCamelCase__ ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = generate_table(UpperCamelCase__ )
_UpperCAmelCase = ""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(UpperCamelCase__ , 2 ):
_UpperCAmelCase , _UpperCAmelCase = divmod(table.index(UpperCamelCase__ ) , 5 )
_UpperCAmelCase , _UpperCAmelCase = divmod(table.index(UpperCamelCase__ ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 657 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__magic_name__ = {'''configuration_gpt_neox''': ['''GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXConfig''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''GPTNeoXTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXForCausalLM''',
'''GPTNeoXForQuestionAnswering''',
'''GPTNeoXForSequenceClassification''',
'''GPTNeoXForTokenClassification''',
'''GPTNeoXLayer''',
'''GPTNeoXModel''',
'''GPTNeoXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 1 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ = 1000 ):
"""simple docstring"""
_UpperCAmelCase = 2**power
_UpperCAmelCase = 0
while n:
_UpperCAmelCase , _UpperCAmelCase = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 657 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : Union[str, Any] = '''convbert'''
def __init__( self , a_=30522 , a_=768 , a_=12 , a_=12 , a_=3072 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=2 , a_=0.02 , a_=1e-12 , a_=1 , a_=0 , a_=2 , a_=768 , a_=2 , a_=9 , a_=1 , a_=None , **a_ , ) -> Tuple:
super().__init__(
pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ , )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = embedding_size
_UpperCAmelCase = head_ratio
_UpperCAmelCase = conv_kernel_size
_UpperCAmelCase = num_groups
_UpperCAmelCase = classifier_dropout
class _lowerCAmelCase ( lowerCamelCase ):
@property
def _a ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_UpperCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 657 | 1 |
"""simple docstring"""
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
def _a ( self , a_ , a_ ) -> int:
return f"gaussian_noise_s={seed}_shape={'_'.join([str(a_ ) for s in shape] )}.npy"
def _a ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _a ( self , a_=0 , a_=(4, 4, 64, 64) , a_=False ) -> Optional[int]:
_UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
_UpperCAmelCase = jnp.array(load_hf_numpy(self.get_file_format(a_ , a_ ) ) , dtype=a_ )
return image
def _a ( self , a_=False , a_="CompVis/stable-diffusion-v1-4" ) -> Tuple:
_UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
_UpperCAmelCase = "bf16" if fpaa else None
_UpperCAmelCase , _UpperCAmelCase = FlaxUNetaDConditionModel.from_pretrained(
a_ , subfolder="unet" , dtype=a_ , revision=a_ )
return model, params
def _a ( self , a_=0 , a_=(4, 77, 768) , a_=False ) -> Optional[Any]:
_UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
_UpperCAmelCase = jnp.array(load_hf_numpy(self.get_file_format(a_ , a_ ) ) , dtype=a_ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def _a ( self , a_ , a_ , a_ ) -> Any:
_UpperCAmelCase , _UpperCAmelCase = self.get_unet_model(model_id="CompVis/stable-diffusion-v1-4" , fpaa=a_ )
_UpperCAmelCase = self.get_latents(a_ , fpaa=a_ )
_UpperCAmelCase = self.get_encoder_hidden_states(a_ , fpaa=a_ )
_UpperCAmelCase = model.apply(
{"params": params} , a_ , jnp.array(a_ , dtype=jnp.intaa ) , encoder_hidden_states=a_ , ).sample
assert sample.shape == latents.shape
_UpperCAmelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_UpperCAmelCase = jnp.array(a_ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(a_ , a_ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def _a ( self , a_ , a_ , a_ ) -> List[Any]:
_UpperCAmelCase , _UpperCAmelCase = self.get_unet_model(model_id="stabilityai/stable-diffusion-2" , fpaa=a_ )
_UpperCAmelCase = self.get_latents(a_ , shape=(4, 4, 96, 96) , fpaa=a_ )
_UpperCAmelCase = self.get_encoder_hidden_states(a_ , shape=(4, 77, 1024) , fpaa=a_ )
_UpperCAmelCase = model.apply(
{"params": params} , a_ , jnp.array(a_ , dtype=jnp.intaa ) , encoder_hidden_states=a_ , ).sample
assert sample.shape == latents.shape
_UpperCAmelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_UpperCAmelCase = jnp.array(a_ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(a_ , a_ , atol=1e-2 )
| 657 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
return "".join([hex(UpperCamelCase__ )[2:].zfill(2 ).upper() for byte in list(UpperCamelCase__ )] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if (len(UpperCamelCase__ ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(UpperCamelCase__ ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(UpperCamelCase__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 | 1 |
"""simple docstring"""
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__magic_name__ = '''src/diffusers'''
__magic_name__ = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
__magic_name__ = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
__magic_name__ = spec.loader.load_module()
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
return line.startswith(UpperCamelCase__ ) or len(UpperCamelCase__ ) <= 1 or re.search(r"^\s*\)(\s*->.*:|:)\s*$" , UpperCamelCase__ ) is not None
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = object_name.split("." )
_UpperCAmelCase = 0
# First let's find the module where our object lives.
_UpperCAmelCase = parts[i]
while i < len(UpperCamelCase__ ) and not os.path.isfile(os.path.join(UpperCamelCase__ , f"{module}.py" ) ):
i += 1
if i < len(UpperCamelCase__ ):
_UpperCAmelCase = os.path.join(UpperCamelCase__ , parts[i] )
if i >= len(UpperCamelCase__ ):
raise ValueError(f"`object_name` should begin with the name of a module of diffusers but got {object_name}." )
with open(os.path.join(UpperCamelCase__ , f"{module}.py" ) , "r" , encoding="utf-8" , newline="\n" ) as f:
_UpperCAmelCase = f.readlines()
# Now let's find the class / func in the code!
_UpperCAmelCase = ""
_UpperCAmelCase = 0
for name in parts[i + 1 :]:
while (
line_index < len(UpperCamelCase__ ) and re.search(rf"^{indent}(class|def)\s+{name}(\(|\:)" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(UpperCamelCase__ ):
raise ValueError(f" {object_name} does not match any function or class in {module}." )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
_UpperCAmelCase = line_index
while line_index < len(UpperCamelCase__ ) and _should_continue(lines[line_index] , UpperCamelCase__ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_UpperCAmelCase = lines[start_index:line_index]
return "".join(UpperCamelCase__ )
__magic_name__ = re.compile(r'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
__magic_name__ = re.compile(r'''^\s*(\S+)->(\S+)(\s+.*|$)''')
__magic_name__ = re.compile(r'''<FILL\s+[^>]*>''')
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = code.split("\n" )
_UpperCAmelCase = 0
while idx < len(UpperCamelCase__ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(UpperCamelCase__ ):
return re.search(r"^(\s*)\S" , lines[idx] ).groups()[0]
return ""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = len(get_indent(UpperCamelCase__ ) ) > 0
if has_indent:
_UpperCAmelCase = f"class Bla:\n{code}"
_UpperCAmelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=UpperCamelCase__ )
_UpperCAmelCase = black.format_str(UpperCamelCase__ , mode=UpperCamelCase__ )
_UpperCAmelCase , _UpperCAmelCase = style_docstrings_in_code(UpperCamelCase__ )
return result[len("class Bla:\n" ) :] if has_indent else result
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__=False ):
"""simple docstring"""
with open(UpperCamelCase__ , "r" , encoding="utf-8" , newline="\n" ) as f:
_UpperCAmelCase = f.readlines()
_UpperCAmelCase = []
_UpperCAmelCase = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(UpperCamelCase__ ):
_UpperCAmelCase = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = search.groups()
_UpperCAmelCase = find_code_in_diffusers(UpperCamelCase__ )
_UpperCAmelCase = get_indent(UpperCamelCase__ )
_UpperCAmelCase = line_index + 1 if indent == theoretical_indent else line_index + 2
_UpperCAmelCase = theoretical_indent
_UpperCAmelCase = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
_UpperCAmelCase = True
while line_index < len(UpperCamelCase__ ) and should_continue:
line_index += 1
if line_index >= len(UpperCamelCase__ ):
break
_UpperCAmelCase = lines[line_index]
_UpperCAmelCase = _should_continue(UpperCamelCase__ , UpperCamelCase__ ) and re.search(f"^{indent}# End copy" , UpperCamelCase__ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_UpperCAmelCase = lines[start_index:line_index]
_UpperCAmelCase = "".join(UpperCamelCase__ )
# Remove any nested `Copied from` comments to avoid circular copies
_UpperCAmelCase = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(UpperCamelCase__ ) is None]
_UpperCAmelCase = "\n".join(UpperCamelCase__ )
# Before comparing, use the `replace_pattern` on the original code.
if len(UpperCamelCase__ ) > 0:
_UpperCAmelCase = replace_pattern.replace("with" , "" ).split("," )
_UpperCAmelCase = [_re_replace_pattern.search(UpperCamelCase__ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = pattern.groups()
_UpperCAmelCase = re.sub(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if option.strip() == "all-casing":
_UpperCAmelCase = re.sub(obja.lower() , obja.lower() , UpperCamelCase__ )
_UpperCAmelCase = re.sub(obja.upper() , obja.upper() , UpperCamelCase__ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
_UpperCAmelCase = blackify(lines[start_index - 1] + theoretical_code )
_UpperCAmelCase = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
_UpperCAmelCase = lines[:start_index] + [theoretical_code] + lines[line_index:]
_UpperCAmelCase = start_index + 1
if overwrite and len(UpperCamelCase__ ) > 0:
# Warn the user a file has been modified.
print(f"Detected changes, rewriting {filename}." )
with open(UpperCamelCase__ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(UpperCamelCase__ )
return diffs
def __lowerCamelCase ( UpperCamelCase__ = False ):
"""simple docstring"""
_UpperCAmelCase = glob.glob(os.path.join(UpperCamelCase__ , "**/*.py" ) , recursive=UpperCamelCase__ )
_UpperCAmelCase = []
for filename in all_files:
_UpperCAmelCase = is_copy_consistent(UpperCamelCase__ , UpperCamelCase__ )
diffs += [f"- {filename}: copy does not match {d[0]} at line {d[1]}" for d in new_diffs]
if not overwrite and len(UpperCamelCase__ ) > 0:
_UpperCAmelCase = "\n".join(UpperCamelCase__ )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__magic_name__ = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 657 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
try:
_UpperCAmelCase = float(UpperCamelCase__ )
except ValueError:
raise ValueError("Please enter a valid number" )
_UpperCAmelCase = decimal - int(UpperCamelCase__ )
if fractional_part == 0:
return int(UpperCamelCase__ ), 1
else:
_UpperCAmelCase = len(str(UpperCamelCase__ ).split("." )[1] )
_UpperCAmelCase = int(decimal * (10**number_of_frac_digits) )
_UpperCAmelCase = 10**number_of_frac_digits
_UpperCAmelCase , _UpperCAmelCase = denominator, numerator
while True:
_UpperCAmelCase = dividend % divisor
if remainder == 0:
break
_UpperCAmelCase , _UpperCAmelCase = divisor, remainder
_UpperCAmelCase , _UpperCAmelCase = numerator / divisor, denominator / divisor
return int(UpperCamelCase__ ), int(UpperCamelCase__ )
if __name__ == "__main__":
print(f'''{decimal_to_fraction(2) = }''')
print(f'''{decimal_to_fraction(89.0) = }''')
print(f'''{decimal_to_fraction("67") = }''')
print(f'''{decimal_to_fraction("45.0") = }''')
print(f'''{decimal_to_fraction(1.5) = }''')
print(f'''{decimal_to_fraction("6.25") = }''')
print(f'''{decimal_to_fraction("78td") = }''')
| 657 | 1 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if num <= 0:
raise ValueError("Input must be a positive integer" )
_UpperCAmelCase = [True] * (num + 1)
_UpperCAmelCase = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , UpperCamelCase__ ):
_UpperCAmelCase = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
__magic_name__ = int(input('''Enter a positive integer: ''').strip())
print(prime_sieve_eratosthenes(user_num))
| 657 |
"""simple docstring"""
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_UpperCAmelCase = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
_UpperCAmelCase = f"{src_lang}-{tgt_lang}"
_UpperCAmelCase = f"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n"
model_card_dir.mkdir(parents=UpperCamelCase__ , exist_ok=UpperCamelCase__ )
_UpperCAmelCase = os.path.join(UpperCamelCase__ , "README.md" )
print(f"Generating {path}" )
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as f:
f.write(UpperCamelCase__ )
# make sure we are under the root of the project
__magic_name__ = Path(__file__).resolve().parent.parent.parent
__magic_name__ = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
__magic_name__ = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name)
| 657 | 1 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
def __init__( self , a_ , a_=7 , a_=3 , a_=30 , a_=400 , a_=True , a_=None , a_=True , a_=[0.5, 0.5, 0.5] , a_=[0.5, 0.5, 0.5] , a_=True , a_=1 / 255 , a_=True , ) -> Optional[int]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_UpperCAmelCase = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_pad
def _a ( self ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _a ( self , a_ , a_=False ) -> Optional[int]:
if not batched:
_UpperCAmelCase = image_inputs[0]
if isinstance(a_ , Image.Image ):
_UpperCAmelCase , _UpperCAmelCase = image.size
else:
_UpperCAmelCase , _UpperCAmelCase = image.shape[1], image.shape[2]
if w < h:
_UpperCAmelCase = int(self.size["shortest_edge"] * h / w )
_UpperCAmelCase = self.size["shortest_edge"]
elif w > h:
_UpperCAmelCase = self.size["shortest_edge"]
_UpperCAmelCase = int(self.size["shortest_edge"] * w / h )
else:
_UpperCAmelCase = self.size["shortest_edge"]
_UpperCAmelCase = self.size["shortest_edge"]
else:
_UpperCAmelCase = []
for image in image_inputs:
_UpperCAmelCase , _UpperCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_UpperCAmelCase = max(a_ , key=lambda a_ : item[0] )[0]
_UpperCAmelCase = max(a_ , key=lambda a_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _lowerCAmelCase ( lowerCamelCase , unittest.TestCase ):
lowercase_ : Dict = DeformableDetrImageProcessor if is_vision_available() else None
def _a ( self ) -> Optional[int]:
_UpperCAmelCase = DeformableDetrImageProcessingTester(self )
@property
def _a ( self ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , "image_mean" ) )
self.assertTrue(hasattr(a_ , "image_std" ) )
self.assertTrue(hasattr(a_ , "do_normalize" ) )
self.assertTrue(hasattr(a_ , "do_resize" ) )
self.assertTrue(hasattr(a_ , "do_rescale" ) )
self.assertTrue(hasattr(a_ , "do_pad" ) )
self.assertTrue(hasattr(a_ , "size" ) )
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , a_ )
_UpperCAmelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=a_ )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , a_ )
def _a ( self ) -> Optional[Any]:
pass
def _a ( self ) -> Optional[Any]:
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(a_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(a_ , batched=a_ )
_UpperCAmelCase = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _a ( self ) -> Optional[int]:
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(a_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase = image_processing(a_ , return_tensors="pt" ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(a_ , batched=a_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _a ( self ) -> List[str]:
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(a_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase = image_processing(a_ , return_tensors="pt" ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(a_ , batched=a_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _a ( self ) -> Dict:
# prepare image and target
_UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
_UpperCAmelCase = json.loads(f.read() )
_UpperCAmelCase = {"image_id": 39769, "annotations": target}
# encode them
_UpperCAmelCase = DeformableDetrImageProcessor()
_UpperCAmelCase = image_processing(images=a_ , annotations=a_ , return_tensors="pt" )
# verify pixel values
_UpperCAmelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , a_ )
_UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , a_ , atol=1e-4 ) )
# verify area
_UpperCAmelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , a_ ) )
# verify boxes
_UpperCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , a_ )
_UpperCAmelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , a_ , atol=1e-3 ) )
# verify image_id
_UpperCAmelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , a_ ) )
# verify is_crowd
_UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , a_ ) )
# verify class_labels
_UpperCAmelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , a_ ) )
# verify orig_size
_UpperCAmelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , a_ ) )
# verify size
_UpperCAmelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , a_ ) )
@slow
def _a ( self ) -> Any:
# prepare image, target and masks_path
_UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
_UpperCAmelCase = json.loads(f.read() )
_UpperCAmelCase = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
_UpperCAmelCase = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
_UpperCAmelCase = DeformableDetrImageProcessor(format="coco_panoptic" )
_UpperCAmelCase = image_processing(images=a_ , annotations=a_ , masks_path=a_ , return_tensors="pt" )
# verify pixel values
_UpperCAmelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , a_ )
_UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , a_ , atol=1e-4 ) )
# verify area
_UpperCAmelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , a_ ) )
# verify boxes
_UpperCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , a_ )
_UpperCAmelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , a_ , atol=1e-3 ) )
# verify image_id
_UpperCAmelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , a_ ) )
# verify is_crowd
_UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , a_ ) )
# verify class_labels
_UpperCAmelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , a_ ) )
# verify masks
_UpperCAmelCase = 822873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , a_ )
# verify orig_size
_UpperCAmelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , a_ ) )
# verify size
_UpperCAmelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , a_ ) )
| 657 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _lowerCAmelCase ( metaclass=lowerCamelCase ):
lowercase_ : Dict = ['''torch''', '''torchsde''']
def __init__( self , *a_ , **a_ ) -> Optional[int]:
requires_backends(self , ["torch", "torchsde"] )
@classmethod
def _a ( cls , *a_ , **a_ ) -> Optional[Any]:
requires_backends(cls , ["torch", "torchsde"] )
@classmethod
def _a ( cls , *a_ , **a_ ) -> List[Any]:
requires_backends(cls , ["torch", "torchsde"] )
| 657 | 1 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
__magic_name__ = datasets.utils.logging.get_logger(__name__)
__magic_name__ = ['''names''', '''prefix''']
__magic_name__ = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
__magic_name__ = ['''encoding_errors''', '''on_bad_lines''']
__magic_name__ = ['''date_format''']
@dataclass
class _lowerCAmelCase ( datasets.BuilderConfig ):
lowercase_ : str = ","
lowercase_ : Optional[str] = None
lowercase_ : Optional[Union[int, List[int], str]] = "infer"
lowercase_ : Optional[List[str]] = None
lowercase_ : Optional[List[str]] = None
lowercase_ : Optional[Union[int, str, List[int], List[str]]] = None
lowercase_ : Optional[Union[List[int], List[str]]] = None
lowercase_ : Optional[str] = None
lowercase_ : bool = True
lowercase_ : Optional[Literal["c", "python", "pyarrow"]] = None
lowercase_ : Dict[Union[int, str], Callable[[Any], Any]] = None
lowercase_ : Optional[list] = None
lowercase_ : Optional[list] = None
lowercase_ : bool = False
lowercase_ : Optional[Union[int, List[int]]] = None
lowercase_ : Optional[int] = None
lowercase_ : Optional[Union[str, List[str]]] = None
lowercase_ : bool = True
lowercase_ : bool = True
lowercase_ : bool = False
lowercase_ : bool = True
lowercase_ : Optional[str] = None
lowercase_ : str = "."
lowercase_ : Optional[str] = None
lowercase_ : str = '"'
lowercase_ : int = 0
lowercase_ : Optional[str] = None
lowercase_ : Optional[str] = None
lowercase_ : Optional[str] = None
lowercase_ : Optional[str] = None
lowercase_ : bool = True
lowercase_ : bool = True
lowercase_ : int = 0
lowercase_ : bool = True
lowercase_ : bool = False
lowercase_ : Optional[str] = None
lowercase_ : int = 10_000
lowercase_ : Optional[datasets.Features] = None
lowercase_ : Optional[str] = "strict"
lowercase_ : Literal["error", "warn", "skip"] = "error"
lowercase_ : Optional[str] = None
def _a ( self ) -> str:
if self.delimiter is not None:
_UpperCAmelCase = self.delimiter
if self.column_names is not None:
_UpperCAmelCase = self.column_names
@property
def _a ( self ) -> Optional[int]:
_UpperCAmelCase = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , a_ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class _lowerCAmelCase ( datasets.ArrowBasedBuilder ):
lowercase_ : Tuple = CsvConfig
def _a ( self ) -> Any:
return datasets.DatasetInfo(features=self.config.features )
def _a ( self , a_ ) -> Optional[Any]:
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}" )
_UpperCAmelCase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(a_ , (str, list, tuple) ):
_UpperCAmelCase = data_files
if isinstance(a_ , a_ ):
_UpperCAmelCase = [files]
_UpperCAmelCase = [dl_manager.iter_files(a_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
_UpperCAmelCase = []
for split_name, files in data_files.items():
if isinstance(a_ , a_ ):
_UpperCAmelCase = [files]
_UpperCAmelCase = [dl_manager.iter_files(a_ ) for file in files]
splits.append(datasets.SplitGenerator(name=a_ , gen_kwargs={"files": files} ) )
return splits
def _a ( self , a_ ) -> pa.Table:
if self.config.features is not None:
_UpperCAmelCase = self.config.features.arrow_schema
if all(not require_storage_cast(a_ ) for feature in self.config.features.values() ):
# cheaper cast
_UpperCAmelCase = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=a_ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
_UpperCAmelCase = table_cast(a_ , a_ )
return pa_table
def _a ( self , a_ ) -> int:
_UpperCAmelCase = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
_UpperCAmelCase = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(a_ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(a_ ) ):
_UpperCAmelCase = pd.read_csv(a_ , iterator=a_ , dtype=a_ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(a_ ):
_UpperCAmelCase = pa.Table.from_pandas(a_ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(a_ )
except ValueError as e:
logger.error(f"Failed to read file '{file}' with error {type(a_ )}: {e}" )
raise
| 657 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
__magic_name__ = logging.get_logger(__name__)
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : Optional[Any] = '''upernet'''
def __init__( self , a_=None , a_=512 , a_=0.02 , a_=[1, 2, 3, 6] , a_=True , a_=0.4 , a_=384 , a_=256 , a_=1 , a_=False , a_=255 , **a_ , ) -> List[Any]:
super().__init__(**a_ )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_UpperCAmelCase = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(a_ , a_ ):
_UpperCAmelCase = backbone_config.get("model_type" )
_UpperCAmelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase = config_class.from_dict(a_ )
_UpperCAmelCase = backbone_config
_UpperCAmelCase = hidden_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = pool_scales
_UpperCAmelCase = use_auxiliary_head
_UpperCAmelCase = auxiliary_loss_weight
_UpperCAmelCase = auxiliary_in_channels
_UpperCAmelCase = auxiliary_channels
_UpperCAmelCase = auxiliary_num_convs
_UpperCAmelCase = auxiliary_concat_input
_UpperCAmelCase = loss_ignore_index
def _a ( self ) -> int:
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.backbone_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
| 657 | 1 |
"""simple docstring"""
from __future__ import annotations
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if len(UpperCamelCase__ ) < 2:
raise ValueError("Monogons and Digons are not polygons in the Euclidean space" )
if any(i <= 0 for i in nums ):
raise ValueError("All values must be greater than 0" )
_UpperCAmelCase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import _LazyModule
__magic_name__ = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 657 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__magic_name__ = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 |
"""simple docstring"""
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCAmelCase :
def __init__( self , a_ , a_=2 , a_=3 , a_=4 , a_=2 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=99 , a_=36 , a_=3 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=16 , a_=2 , a_=0.02 , a_=6 , a_=6 , a_=3 , a_=4 , a_=None , a_=1000 , ) -> Optional[Any]:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = text_seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = coordinate_size
_UpperCAmelCase = shape_size
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_UpperCAmelCase = text_seq_length
_UpperCAmelCase = (image_size // patch_size) ** 2 + 1
_UpperCAmelCase = self.text_seq_length + self.image_seq_length
def _a ( self ) -> Dict:
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_UpperCAmelCase = bbox[i, j, 3]
_UpperCAmelCase = bbox[i, j, 1]
_UpperCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_UpperCAmelCase = bbox[i, j, 2]
_UpperCAmelCase = bbox[i, j, 0]
_UpperCAmelCase = t
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.text_seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_UpperCAmelCase = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Tuple:
_UpperCAmelCase = LayoutLMvaModel(config=a_ )
model.to(a_ )
model.eval()
# text + image
_UpperCAmelCase = model(a_ , pixel_values=a_ )
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ )
_UpperCAmelCase = model(a_ , bbox=a_ , pixel_values=a_ , token_type_ids=a_ )
_UpperCAmelCase = model(a_ , bbox=a_ , pixel_values=a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_UpperCAmelCase = model(a_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_UpperCAmelCase = model(pixel_values=a_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Optional[Any]:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = LayoutLMvaForSequenceClassification(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Union[str, Any]:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = LayoutLMvaForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Dict:
_UpperCAmelCase = LayoutLMvaForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , start_positions=a_ , end_positions=a_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self ) -> Optional[int]:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase_ : Any = False
lowercase_ : Dict = False
lowercase_ : List[str] = False
lowercase_ : str = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase_ : int = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def _a ( self , a_ , a_ , a_ , a_ , a_ ) -> List[str]:
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = LayoutLMvaModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=a_ , hidden_size=37 )
def _a ( self , a_ , a_ , a_=False ) -> List[str]:
_UpperCAmelCase = copy.deepcopy(a_ )
if model_class in get_values(a_ ):
_UpperCAmelCase = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(a_ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(a_ ):
_UpperCAmelCase = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=a_ )
elif model_class in get_values(a_ ):
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
elif model_class in [
*get_values(a_ ),
]:
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
elif model_class in [
*get_values(a_ ),
]:
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=a_ , )
return inputs_dict
def _a ( self ) -> int:
self.config_tester.run_common_tests()
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase = type
self.model_tester.create_and_check_model(*a_ )
def _a ( self ) -> int:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a_ )
def _a ( self ) -> Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a_ )
@slow
def _a ( self ) -> List[str]:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = LayoutLMvaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _a ( self ) -> List[Any]:
return LayoutLMvaImageProcessor(apply_ocr=a_ ) if is_vision_available() else None
@slow
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(a_ )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=a_ , return_tensors="pt" ).pixel_values.to(a_ )
_UpperCAmelCase = torch.tensor([[1, 2]] )
_UpperCAmelCase = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
_UpperCAmelCase = model(
input_ids=input_ids.to(a_ ) , bbox=bbox.to(a_ ) , pixel_values=pixel_values.to(a_ ) , )
# verify the logits
_UpperCAmelCase = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , a_ )
_UpperCAmelCase = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , a_ , atol=1e-4 ) )
| 657 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {
'''configuration_convbert''': ['''CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvBertConfig''', '''ConvBertOnnxConfig'''],
'''tokenization_convbert''': ['''ConvBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''ConvBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvBertForMaskedLM''',
'''ConvBertForMultipleChoice''',
'''ConvBertForQuestionAnswering''',
'''ConvBertForSequenceClassification''',
'''ConvBertForTokenClassification''',
'''ConvBertLayer''',
'''ConvBertModel''',
'''ConvBertPreTrainedModel''',
'''load_tf_weights_in_convbert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFConvBertForMaskedLM''',
'''TFConvBertForMultipleChoice''',
'''TFConvBertForQuestionAnswering''',
'''TFConvBertForSequenceClassification''',
'''TFConvBertForTokenClassification''',
'''TFConvBertLayer''',
'''TFConvBertModel''',
'''TFConvBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 |
"""simple docstring"""
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _lowerCAmelCase ( unittest.TestCase ):
lowercase_ : str = MODEL_FOR_MASKED_LM_MAPPING
lowercase_ : List[str] = TF_MODEL_FOR_MASKED_LM_MAPPING
def _a ( self ) -> Optional[Any]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def _a ( self ) -> str:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf" )
_UpperCAmelCase = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is grouped", "score": 2.1e-05, "token": 38015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-05, "token": 25506, "token_str": " accuser"},
] , )
_UpperCAmelCase = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-05,
"token": 38015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-05,
"token": 25506,
"token_str": " accuser",
},
] , )
_UpperCAmelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-05, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt" )
_UpperCAmelCase = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is Maul", "score": 2.2e-05, "token": 35676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
_UpperCAmelCase = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
_UpperCAmelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
{"sequence": "My name is Patrick", "score": 2.1e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-05, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
] , )
_UpperCAmelCase = unmasker("My name is <mask> <mask>" , top_k=2 )
self.assertEqual(
nested_simplify(a_ , decimals=6 ) , [
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def _a ( self ) -> int:
_UpperCAmelCase = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt" )
# convert model to fp16
pipe.model.half()
_UpperCAmelCase = pipe("Paris is the [MASK] of France." )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(a_ , a_ )
@slow
@require_torch
def _a ( self ) -> int:
_UpperCAmelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt" )
self.run_large_test(a_ )
@slow
@require_tf
def _a ( self ) -> int:
_UpperCAmelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf" )
self.run_large_test(a_ )
def _a ( self , a_ ) -> int:
_UpperCAmelCase = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(a_ ) , [
{"sequence": "My name is John", "score": 0.008, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.007, "token": 1573, "token_str": " Chris"},
] , )
_UpperCAmelCase = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(a_ ) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.251,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.214,
"token": 12790,
"token_str": " Lyon",
},
] , )
_UpperCAmelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(a_ ) , [
{"sequence": "My name is Patrick", "score": 0.005, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.000, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.000, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def _a ( self ) -> Any:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt" )
_UpperCAmelCase = None
_UpperCAmelCase = None
self.run_pipeline_test(a_ , [] )
@require_tf
def _a ( self ) -> List[Any]:
_UpperCAmelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf" )
_UpperCAmelCase = None
_UpperCAmelCase = None
self.run_pipeline_test(a_ , [] )
def _a ( self , a_ , a_ , a_ ) -> Optional[Any]:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)" )
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = [
f"This is another {tokenizer.mask_token} test",
]
return fill_masker, examples
def _a ( self , a_ , a_ ) -> List[str]:
_UpperCAmelCase = fill_masker.tokenizer
_UpperCAmelCase = fill_masker.model
_UpperCAmelCase = fill_masker(
f"This is a {tokenizer.mask_token}" , )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = fill_masker([f"This is a {tokenizer.mask_token}"] )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = fill_masker([f"This is a {tokenizer.mask_token}", f"Another {tokenizer.mask_token} great test."] )
self.assertEqual(
a_ , [
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
] , )
with self.assertRaises(a_ ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(a_ ):
fill_masker("This is" )
self.run_test_top_k(a_ , a_ )
self.run_test_targets(a_ , a_ )
self.run_test_top_k_targets(a_ , a_ )
self.fill_mask_with_duplicate_targets_and_top_k(a_ , a_ )
self.fill_mask_with_multiple_masks(a_ , a_ )
def _a ( self , a_ , a_ ) -> Optional[int]:
_UpperCAmelCase = tokenizer.get_vocab()
_UpperCAmelCase = sorted(vocab.keys() )[:2]
# Pipeline argument
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ , targets=a_ )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , a_ )
_UpperCAmelCase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(a_ ) )
# Call argument
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=a_ )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , a_ )
_UpperCAmelCase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(a_ ) )
# Score equivalence
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=a_ )
_UpperCAmelCase = [top_mask["token_str"] for top_mask in outputs]
_UpperCAmelCase = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(a_ ) == set(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=a_ )
_UpperCAmelCase = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(a_ ) , nested_simplify(a_ ) )
# Raises with invalid
with self.assertRaises(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=[""] )
with self.assertRaises(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets="" )
def _a ( self , a_ , a_ ) -> str:
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ , top_k=2 )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
] , )
self.assertEqual(nested_simplify(a_ ) , nested_simplify(a_ ) )
def _a ( self , a_ , a_ ) -> List[Any]:
_UpperCAmelCase = tokenizer.get_vocab()
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
# top_k=2, ntargets=3
_UpperCAmelCase = sorted(vocab.keys() )[:3]
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=2 , targets=a_ )
# If we use the most probably targets, and filter differently, we should still
# have the same results
_UpperCAmelCase = [el["token_str"] for el in sorted(a_ , key=lambda a_ : x["score"] , reverse=a_ )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(a_ ).issubset(a_ ):
_UpperCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=3 , targets=a_ )
# They should yield exactly the same result
self.assertEqual(nested_simplify(a_ ) , nested_simplify(a_ ) )
def _a ( self , a_ , a_ ) -> Optional[Any]:
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = tokenizer.get_vocab()
# String duplicates + id duplicates
_UpperCAmelCase = sorted(vocab.keys() )[:3]
_UpperCAmelCase = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_UpperCAmelCase = fill_masker(f"My name is {tokenizer.mask_token}" , targets=a_ , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(a_ ) , 3 )
def _a ( self , a_ , a_ ) -> Any:
_UpperCAmelCase = FillMaskPipeline(model=a_ , tokenizer=a_ )
_UpperCAmelCase = fill_masker(
f"This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
a_ , [
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
[
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
{"sequence": ANY(a_ ), "score": ANY(a_ ), "token": ANY(a_ ), "token_str": ANY(a_ )},
],
] , )
| 657 | 1 |
"""simple docstring"""
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class _lowerCAmelCase ( lowerCamelCase ):
def _a ( self ) -> List[str]:
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def _a ( self ) -> Optional[int]:
with self.assertRaises(a_ ):
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def _a ( self ) -> int:
with self.assertRaises(a_ ):
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("bool" ) , type=Value("int64" ) ) )
def _a ( self ) -> Optional[Any]:
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _a ( self ) -> int:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , type=Value("int64" ) ) )
def _a ( self ) -> Dict:
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , try_type=Value("int64" ) ) )
self.assertEqual(arr.type , pa.string() )
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def _a ( self ) -> Tuple:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , type=ArrayaD((1, 3) , "int64" ) ) )
def _a ( self ) -> str:
_UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def _a ( self ) -> Tuple:
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def _a ( self ) -> List[str]:
import PIL.Image
_UpperCAmelCase = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"datasets.arrow_writer.cast_to_python_objects" , side_effect=a_ ) as mock_cast_to_python_objects:
_UpperCAmelCase = pa.array(TypedSequence([{"path": None, "bytes": B"image_bytes"}, pil_image] , type=Image() ) )
_UpperCAmelCase , _UpperCAmelCase = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("optimize_list_casting" , a_ )
self.assertFalse(kwargs["optimize_list_casting"] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferReader(UpperCamelCase__ ) if isinstance(UpperCamelCase__ , pa.Buffer ) else pa.memory_map(UpperCamelCase__ )
_UpperCAmelCase = pa.ipc.open_stream(UpperCamelCase__ )
_UpperCAmelCase = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = Features({"labels": ClassLabel(names=["neg", "pos"] )} )
with ArrowWriter(stream=UpperCamelCase__ , features=UpperCamelCase__ ) as writer:
writer.write({"labels": 0} )
writer.write({"labels": 1} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pa.ipc.open_stream(UpperCamelCase__ )
_UpperCAmelCase = f.read_all()
_UpperCAmelCase = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(UpperCamelCase__ )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ , hash_salt="split_name" , check_duplicates=UpperCamelCase__ , ) as writer:
with pytest.raises(UpperCamelCase__ ):
writer.write({"col_1": "foo", "col_2": 1} , key=[1, 2] )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ , hash_salt="split_name" , check_duplicates=UpperCamelCase__ , ) as writer:
with pytest.raises(UpperCamelCase__ ):
writer.write({"col_1": "foo", "col_2": 1} , key=10 )
writer.write({"col_1": "bar", "col_2": 2} , key=10 )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ , hash_salt="split_name" , check_duplicates=UpperCamelCase__ , ) as writer:
writer.write({"col_1": "foo", "col_2": 1} , key=1 )
writer.write({"col_1": "bar", "col_2": 2} , key=2 )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
writer.write_batch({"col_1": [], "col_2": []} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write_table(pa.Table.from_pydict({"col_1": ["foo", "bar"], "col_2": [1, 2]} ) )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write_row(pa.Table.from_pydict({"col_1": ["foo"], "col_2": [1]} ) )
writer.write_row(pa.Table.from_pydict({"col_1": ["bar"], "col_2": [2]} ) )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __lowerCamelCase ( ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
_UpperCAmelCase = os.path.join(UpperCamelCase__ , "test.arrow" )
with ArrowWriter(path=UpperCamelCase__ , schema=pa.schema(UpperCamelCase__ ) ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(UpperCamelCase__ , 1 )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if pa.types.is_list(UpperCamelCase__ ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if isinstance(lst[0] , UpperCamelCase__ ):
change_first_primitive_element_in_list(lst[0] , UpperCamelCase__ )
else:
_UpperCAmelCase = value
@pytest.mark.parametrize("optimized_int_type, expected_dtype" , [(None, pa.intaa()), (Value("int32" ), pa.intaa())] )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.array(TypedSequence(UpperCamelCase__ , optimized_int_type=UpperCamelCase__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"col, expected_dtype" , [
("attention_mask", pa.inta()),
("special_tokens_mask", pa.inta()),
("token_type_ids", pa.inta()),
("input_ids", pa.intaa()),
("other", pa.intaa()),
] , )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.array(OptimizedTypedSequence(UpperCamelCase__ , col=UpperCamelCase__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
_UpperCAmelCase = copy.deepcopy(UpperCamelCase__ )
_UpperCAmelCase = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase = pa.array(OptimizedTypedSequence(UpperCamelCase__ , col=UpperCamelCase__ ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("raise_exception" , [False, True] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = str(tmp_path / "dataset-train.arrow" )
try:
with ArrowWriter(path=UpperCamelCase__ ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = "mock://dataset-train.arrow"
with ArrowWriter(path=UpperCamelCase__ , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(UpperCamelCase__ ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(UpperCamelCase__ )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(stream=UpperCamelCase__ ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pq.read_table(UpperCamelCase__ )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("embed_local_files" , [False, True] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
import PIL.Image
_UpperCAmelCase = str(tmp_path / "test_image_rgb.jpg" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(UpperCamelCase__ , format="png" )
_UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(
stream=UpperCamelCase__ , features=Features({"image": Image()} ) , embed_local_files=UpperCamelCase__ ) as writer:
writer.write({"image": image_path} )
writer.finalize()
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pq.read_table(UpperCamelCase__ )
_UpperCAmelCase = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["image"][0]["path"] , UpperCamelCase__ )
with open(UpperCamelCase__ , "rb" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = pa.schema([pa.field("col_1" , pa.string() , nullable=UpperCamelCase__ )] )
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(stream=UpperCamelCase__ ) as writer:
writer._build_writer(inferred_schema=UpperCamelCase__ )
assert writer._schema == pa.schema([pa.field("col_1" , pa.string() )] )
| 657 |
"""simple docstring"""
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class _lowerCAmelCase ( lowerCamelCase ):
def _a ( self ) -> List[str]:
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def _a ( self ) -> Optional[int]:
with self.assertRaises(a_ ):
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def _a ( self ) -> int:
with self.assertRaises(a_ ):
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("bool" ) , type=Value("int64" ) ) )
def _a ( self ) -> Optional[Any]:
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _a ( self ) -> int:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , type=Value("int64" ) ) )
def _a ( self ) -> Dict:
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , try_type=Value("int64" ) ) )
self.assertEqual(arr.type , pa.string() )
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def _a ( self ) -> Tuple:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , type=ArrayaD((1, 3) , "int64" ) ) )
def _a ( self ) -> str:
_UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def _a ( self ) -> Tuple:
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def _a ( self ) -> List[str]:
import PIL.Image
_UpperCAmelCase = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"datasets.arrow_writer.cast_to_python_objects" , side_effect=a_ ) as mock_cast_to_python_objects:
_UpperCAmelCase = pa.array(TypedSequence([{"path": None, "bytes": B"image_bytes"}, pil_image] , type=Image() ) )
_UpperCAmelCase , _UpperCAmelCase = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("optimize_list_casting" , a_ )
self.assertFalse(kwargs["optimize_list_casting"] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferReader(UpperCamelCase__ ) if isinstance(UpperCamelCase__ , pa.Buffer ) else pa.memory_map(UpperCamelCase__ )
_UpperCAmelCase = pa.ipc.open_stream(UpperCamelCase__ )
_UpperCAmelCase = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = Features({"labels": ClassLabel(names=["neg", "pos"] )} )
with ArrowWriter(stream=UpperCamelCase__ , features=UpperCamelCase__ ) as writer:
writer.write({"labels": 0} )
writer.write({"labels": 1} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pa.ipc.open_stream(UpperCamelCase__ )
_UpperCAmelCase = f.read_all()
_UpperCAmelCase = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(UpperCamelCase__ )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ , hash_salt="split_name" , check_duplicates=UpperCamelCase__ , ) as writer:
with pytest.raises(UpperCamelCase__ ):
writer.write({"col_1": "foo", "col_2": 1} , key=[1, 2] )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ , hash_salt="split_name" , check_duplicates=UpperCamelCase__ , ) as writer:
with pytest.raises(UpperCamelCase__ ):
writer.write({"col_1": "foo", "col_2": 1} , key=10 )
writer.write({"col_1": "bar", "col_2": 2} , key=10 )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ , hash_salt="split_name" , check_duplicates=UpperCamelCase__ , ) as writer:
writer.write({"col_1": "foo", "col_2": 1} , key=1 )
writer.write({"col_1": "bar", "col_2": 2} , key=2 )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
writer.write_batch({"col_1": [], "col_2": []} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write_table(pa.Table.from_pydict({"col_1": ["foo", "bar"], "col_2": [1, 2]} ) )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write_row(pa.Table.from_pydict({"col_1": ["foo"], "col_2": [1]} ) )
writer.write_row(pa.Table.from_pydict({"col_1": ["bar"], "col_2": [2]} ) )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __lowerCamelCase ( ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
_UpperCAmelCase = os.path.join(UpperCamelCase__ , "test.arrow" )
with ArrowWriter(path=UpperCamelCase__ , schema=pa.schema(UpperCamelCase__ ) ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(UpperCamelCase__ , 1 )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if pa.types.is_list(UpperCamelCase__ ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if isinstance(lst[0] , UpperCamelCase__ ):
change_first_primitive_element_in_list(lst[0] , UpperCamelCase__ )
else:
_UpperCAmelCase = value
@pytest.mark.parametrize("optimized_int_type, expected_dtype" , [(None, pa.intaa()), (Value("int32" ), pa.intaa())] )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.array(TypedSequence(UpperCamelCase__ , optimized_int_type=UpperCamelCase__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"col, expected_dtype" , [
("attention_mask", pa.inta()),
("special_tokens_mask", pa.inta()),
("token_type_ids", pa.inta()),
("input_ids", pa.intaa()),
("other", pa.intaa()),
] , )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.array(OptimizedTypedSequence(UpperCamelCase__ , col=UpperCamelCase__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
_UpperCAmelCase = copy.deepcopy(UpperCamelCase__ )
_UpperCAmelCase = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase = pa.array(OptimizedTypedSequence(UpperCamelCase__ , col=UpperCamelCase__ ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("raise_exception" , [False, True] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = str(tmp_path / "dataset-train.arrow" )
try:
with ArrowWriter(path=UpperCamelCase__ ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = "mock://dataset-train.arrow"
with ArrowWriter(path=UpperCamelCase__ , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(UpperCamelCase__ ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(UpperCamelCase__ )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(stream=UpperCamelCase__ ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pq.read_table(UpperCamelCase__ )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("embed_local_files" , [False, True] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
import PIL.Image
_UpperCAmelCase = str(tmp_path / "test_image_rgb.jpg" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(UpperCamelCase__ , format="png" )
_UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(
stream=UpperCamelCase__ , features=Features({"image": Image()} ) , embed_local_files=UpperCamelCase__ ) as writer:
writer.write({"image": image_path} )
writer.finalize()
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pq.read_table(UpperCamelCase__ )
_UpperCAmelCase = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["image"][0]["path"] , UpperCamelCase__ )
with open(UpperCamelCase__ , "rb" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = pa.schema([pa.field("col_1" , pa.string() , nullable=UpperCamelCase__ )] )
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(stream=UpperCamelCase__ ) as writer:
writer._build_writer(inferred_schema=UpperCamelCase__ )
assert writer._schema == pa.schema([pa.field("col_1" , pa.string() )] )
| 657 | 1 |
"""simple docstring"""
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = "x" , UpperCamelCase__ = 10**-10 , UpperCamelCase__ = 1 , ):
"""simple docstring"""
_UpperCAmelCase = symbols(UpperCamelCase__ )
_UpperCAmelCase = lambdify(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase = lambdify(UpperCamelCase__ , diff(UpperCamelCase__ , UpperCamelCase__ ) )
_UpperCAmelCase = starting_point
while True:
if diff_function(UpperCamelCase__ ) != 0:
_UpperCAmelCase = prev_guess - multiplicity * func(UpperCamelCase__ ) / diff_function(
UpperCamelCase__ )
else:
raise ZeroDivisionError("Could not find root" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
_UpperCAmelCase = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
# Find fourth Root of 5
print(f'''The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}''')
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
f'''{newton_raphson("log(y) - 1", 2, variable="y")}''',
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
f'''{newton_raphson("exp(x) - 1", 10, precision=0.005)}''',
)
# Find root of cos(x)
print(f'''The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}''')
| 657 |
"""simple docstring"""
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class _lowerCAmelCase ( unittest.TestCase ):
def _a ( self ) -> Optional[Any]:
_UpperCAmelCase = ["a", "b", "c"]
# Defaults to last layer if both are None
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(a_ , a_ , a_ )
self.assertEqual(a_ , ["c"] )
self.assertEqual(a_ , [2] )
# Out indices set to match out features
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(["a", "c"] , a_ , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features set to match out indices
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(a_ , [0, 2] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features selected from negative indices
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(a_ , [-3, -1] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [-3, -1] )
def _a ( self ) -> Optional[int]:
# Stage names must be set
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , a_ )
# Out features must be a list
with self.assertRaises(a_ ):
verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] )
# Out features must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] )
# Out indices must be a list or tuple
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , 0 , ["a", "b"] )
# Out indices must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , (0, 1) , ["a"] )
# Out features and out indices must be the same length
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] )
# Out features should match out indices
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] )
# Out features and out indices should be in order
with self.assertRaises(a_ ):
verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] )
# Check passes with valid inputs
verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] )
def _a ( self ) -> int:
_UpperCAmelCase = BackboneMixin()
_UpperCAmelCase = ["a", "b", "c"]
_UpperCAmelCase = ["a", "c"]
_UpperCAmelCase = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
_UpperCAmelCase = ["a", "b"]
self.assertEqual(backbone.out_features , ["a", "b"] )
self.assertEqual(backbone.out_indices , [0, 1] )
_UpperCAmelCase = [-3, -1]
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 657 | 1 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ = 1000 ):
"""simple docstring"""
_UpperCAmelCase = -1
_UpperCAmelCase = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
_UpperCAmelCase = (n * n - 2 * a * n) // (2 * n - 2 * a)
_UpperCAmelCase = n - a - b
if c * c == (a * a + b * b):
_UpperCAmelCase = a * b * c
if candidate >= product:
_UpperCAmelCase = candidate
return product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 657 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 1 |
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if (ksize % 2) == 0:
_UpperCAmelCase = ksize + 1
_UpperCAmelCase = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(UpperCamelCase__ ):
for x in range(UpperCamelCase__ ):
# distance from center
_UpperCAmelCase = x - ksize // 2
_UpperCAmelCase = y - ksize // 2
# degree to radiant
_UpperCAmelCase = theta / 180 * np.pi
_UpperCAmelCase = np.cos(_theta )
_UpperCAmelCase = np.sin(_theta )
# get kernel x
_UpperCAmelCase = cos_theta * px + sin_theta * py
# get kernel y
_UpperCAmelCase = -sin_theta * px + cos_theta * py
# fill kernel
_UpperCAmelCase = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__magic_name__ = imread('''../image_data/lena.jpg''')
# turn image in gray scale value
__magic_name__ = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__magic_name__ = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 1_20, 1_50]:
__magic_name__ = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__magic_name__ = out / out.max() * 2_55
__magic_name__ = out.astype(np.uinta)
imshow('''Original''', gray)
imshow('''Gabor filter with 20x20 mask and 6 directions''', out)
waitKey(0)
| 657 |
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class _lowerCAmelCase ( lowerCamelCase , unittest.TestCase ):
lowercase_ : Tuple = BarthezTokenizer
lowercase_ : List[Any] = BarthezTokenizerFast
lowercase_ : Dict = True
lowercase_ : int = True
def _a ( self ) -> Any:
super().setUp()
_UpperCAmelCase = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=a_ )
_UpperCAmelCase = tokenizer
def _a ( self ) -> List[Any]:
_UpperCAmelCase = "<pad>"
_UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(a_ ) , 101122 )
def _a ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def _a ( self ) -> List[Any]:
_UpperCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_UpperCAmelCase = [0, 57, 3018, 70307, 91, 2]
_UpperCAmelCase = self.tokenizer(
a_ , max_length=len(a_ ) , padding=a_ , truncation=a_ , return_tensors="pt" )
self.assertIsInstance(a_ , a_ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
_UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(a_ , a_ )
def _a ( self ) -> str:
if not self.test_rust_tokenizer:
return
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = "I was born in 92000, and this is falsé."
_UpperCAmelCase = tokenizer.tokenize(a_ )
_UpperCAmelCase = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
_UpperCAmelCase = tokenizer.encode(a_ , add_special_tokens=a_ )
_UpperCAmelCase = rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = tokenizer.encode(a_ )
_UpperCAmelCase = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
@slow
def _a ( self ) -> Dict:
# fmt: off
_UpperCAmelCase = {"input_ids": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_UpperCAmelCase = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=a_ , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=a_ , )
| 657 | 1 |
"""simple docstring"""
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
__magic_name__ = logging.getLogger(__name__)
class _lowerCAmelCase ( lowerCamelCase ):
def __init__( self , a_ , a_ , a_ , a_=None ) -> str:
super().__init__(
a_ , question_encoder_tokenizer=a_ , generator_tokenizer=a_ , index=a_ , init_retrieval=a_ , )
_UpperCAmelCase = None
def _a ( self , a_ ) -> int:
logger.info("initializing retrieval" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("dist initialized" )
# needs to be set manually
_UpperCAmelCase = self._infer_socket_ifname()
# avoid clash with the NCCL port
_UpperCAmelCase = str(distributed_port + 1 )
_UpperCAmelCase = dist.new_group(ranks=a_ , backend="gloo" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("dist not initialized / main" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def _a ( self ) -> Optional[Any]:
return dist.get_rank(group=self.process_group ) == 0
def _a ( self , a_ , a_ , a_=torch.floataa ) -> Dict:
_UpperCAmelCase = torch.empty(a_ , dtype=a_ )
dist.scatter(a_ , src=0 , scatter_list=a_ , group=self.process_group )
return target_tensor
def _a ( self ) -> str:
_UpperCAmelCase = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
_UpperCAmelCase = next((addr for addr in addrs if addr.startswith("e" )) , a_ )
return ifname
def _a ( self , a_ , a_ ) -> Tuple[np.ndarray, List[dict]]:
# single GPU training
if not dist.is_initialized():
_UpperCAmelCase , _UpperCAmelCase = self._main_retrieve(a_ , a_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(a_ )
# distributed training
_UpperCAmelCase = dist.get_world_size(group=self.process_group )
# gather logic
_UpperCAmelCase = None
if self._is_main():
_UpperCAmelCase = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(a_ )]
dist.gather(torch.tensor(a_ ) , dst=0 , gather_list=a_ , group=self.process_group )
# scatter logic
_UpperCAmelCase = question_hidden_states.shape[0]
_UpperCAmelCase = []
_UpperCAmelCase = []
if self._is_main():
assert len(a_ ) == world_size
_UpperCAmelCase , _UpperCAmelCase = self._main_retrieve(torch.cat(a_ ).numpy() , a_ )
_UpperCAmelCase , _UpperCAmelCase = torch.tensor(a_ ), torch.tensor(a_ )
_UpperCAmelCase = self._chunk_tensor(a_ , a_ )
_UpperCAmelCase = self._chunk_tensor(a_ , a_ )
_UpperCAmelCase = self._scattered(a_ , [n_queries, n_docs] , target_type=torch.intaa )
_UpperCAmelCase = self._scattered(a_ , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(a_ )
| 657 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_UpperCAmelCase = f"Input value of [number={number}] must be an integer"
raise TypeError(UpperCamelCase__ )
if number < 0:
return False
_UpperCAmelCase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 | 1 |
"""simple docstring"""
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
return "".join(sorted(UpperCamelCase__ ) )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
return word_by_signature[signature(UpperCamelCase__ )]
__magic_name__ = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''')
__magic_name__ = sorted({word.strip().lower() for word in data.splitlines()})
__magic_name__ = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
__magic_name__ = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('''anagrams.txt''', '''w''') as file:
file.write('''all_anagrams = \n ''')
file.write(pprint.pformat(all_anagrams))
| 657 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = Dict[str, Any]
__magic_name__ = List[Prediction]
@add_end_docstrings(lowerCamelCase )
class _lowerCAmelCase ( lowerCamelCase ):
def __init__( self , *a_ , **a_ ) -> Optional[int]:
super().__init__(*a_ , **a_ )
if self.framework == "tf":
raise ValueError(f"The {self.__class__} is only available in PyTorch." )
requires_backends(self , "vision" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def _a ( self , **a_ ) -> List[str]:
_UpperCAmelCase = {}
if "threshold" in kwargs:
_UpperCAmelCase = kwargs["threshold"]
return {}, {}, postprocess_kwargs
def __call__( self , *a_ , **a_ ) -> Union[Predictions, List[Prediction]]:
return super().__call__(*a_ , **a_ )
def _a ( self , a_ ) -> Optional[Any]:
_UpperCAmelCase = load_image(a_ )
_UpperCAmelCase = torch.IntTensor([[image.height, image.width]] )
_UpperCAmelCase = self.image_processor(images=[image] , return_tensors="pt" )
if self.tokenizer is not None:
_UpperCAmelCase = self.tokenizer(text=inputs["words"] , boxes=inputs["boxes"] , return_tensors="pt" )
_UpperCAmelCase = target_size
return inputs
def _a ( self , a_ ) -> Optional[Any]:
_UpperCAmelCase = model_inputs.pop("target_size" )
_UpperCAmelCase = self.model(**a_ )
_UpperCAmelCase = outputs.__class__({"target_size": target_size, **outputs} )
if self.tokenizer is not None:
_UpperCAmelCase = model_inputs["bbox"]
return model_outputs
def _a ( self , a_ , a_=0.9 ) -> int:
_UpperCAmelCase = model_outputs["target_size"]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
_UpperCAmelCase , _UpperCAmelCase = target_size[0].tolist()
def unnormalize(a_ ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
_UpperCAmelCase , _UpperCAmelCase = model_outputs["logits"].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
_UpperCAmelCase = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
_UpperCAmelCase = [unnormalize(a_ ) for bbox in model_outputs["bbox"].squeeze(0 )]
_UpperCAmelCase = ["score", "label", "box"]
_UpperCAmelCase = [dict(zip(a_ , a_ ) ) for vals in zip(scores.tolist() , a_ , a_ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
_UpperCAmelCase = self.image_processor.post_process_object_detection(a_ , a_ , a_ )
_UpperCAmelCase = raw_annotations[0]
_UpperCAmelCase = raw_annotation["scores"]
_UpperCAmelCase = raw_annotation["labels"]
_UpperCAmelCase = raw_annotation["boxes"]
_UpperCAmelCase = scores.tolist()
_UpperCAmelCase = [self.model.config.idalabel[label.item()] for label in labels]
_UpperCAmelCase = [self._get_bounding_box(a_ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
_UpperCAmelCase = ["score", "label", "box"]
_UpperCAmelCase = [
dict(zip(a_ , a_ ) )
for vals in zip(raw_annotation["scores"] , raw_annotation["labels"] , raw_annotation["boxes"] )
]
return annotation
def _a ( self , a_ ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError("The ObjectDetectionPipeline is only available in PyTorch." )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = box.int().tolist()
_UpperCAmelCase = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 657 | 1 |
"""simple docstring"""
from math import ceil, sqrt
def __lowerCamelCase ( UpperCamelCase__ = 100_0000 ):
"""simple docstring"""
_UpperCAmelCase = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
_UpperCAmelCase = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
_UpperCAmelCase = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f'''{solution() = }''')
| 657 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def merge(UpperCamelCase__ , UpperCamelCase__ ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(UpperCamelCase__ ) <= 1:
return collection
_UpperCAmelCase = len(UpperCamelCase__ ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__magic_name__ = input('''Enter numbers separated by a comma:\n''').strip()
__magic_name__ = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 657 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class _lowerCAmelCase :
# setable values
lowercase_ : Optional[int] = None
lowercase_ : Optional[jnp.ndarray] = None
lowercase_ : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def _a ( cls ) -> str:
return cls()
@dataclass
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : jnp.ndarray
lowercase_ : jnp.ndarray
lowercase_ : KarrasVeSchedulerState
class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase ):
@property
def _a ( self ) -> Dict:
return True
@register_to_config
def __init__( self , a_ = 0.02 , a_ = 100 , a_ = 1.007 , a_ = 80 , a_ = 0.05 , a_ = 50 , ) -> Any:
pass
def _a ( self ) -> Dict:
return KarrasVeSchedulerState.create()
def _a ( self , a_ , a_ , a_ = () ) -> KarrasVeSchedulerState:
_UpperCAmelCase = jnp.arange(0 , a_ )[::-1].copy()
_UpperCAmelCase = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=a_ , schedule=jnp.array(a_ , dtype=jnp.floataa ) , timesteps=a_ , )
def _a ( self , a_ , a_ , a_ , a_ , ) -> Tuple[jnp.ndarray, float]:
if self.config.s_min <= sigma <= self.config.s_max:
_UpperCAmelCase = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
_UpperCAmelCase = 0
# sample eps ~ N(0, S_noise^2 * I)
_UpperCAmelCase = random.split(a_ , num=1 )
_UpperCAmelCase = self.config.s_noise * random.normal(key=a_ , shape=sample.shape )
_UpperCAmelCase = sigma + gamma * sigma
_UpperCAmelCase = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
_UpperCAmelCase = sample_hat + sigma_hat * model_output
_UpperCAmelCase = (sample_hat - pred_original_sample) / sigma_hat
_UpperCAmelCase = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=a_ , derivative=a_ , state=a_ )
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
_UpperCAmelCase = sample_prev + sigma_prev * model_output
_UpperCAmelCase = (sample_prev - pred_original_sample) / sigma_prev
_UpperCAmelCase = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=a_ , derivative=a_ , state=a_ )
def _a ( self , a_ , a_ , a_ , a_ ) -> Tuple:
raise NotImplementedError()
| 657 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _lowerCAmelCase :
def __init__( self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ) -> List[str]:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = self.vocab_size - 1
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
_UpperCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> Optional[int]:
_UpperCAmelCase = OpenAIGPTModel(config=a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(a_ , token_type_ids=a_ , head_mask=a_ )
_UpperCAmelCase = model(a_ , token_type_ids=a_ )
_UpperCAmelCase = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> List[Any]:
_UpperCAmelCase = OpenAIGPTLMHeadModel(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> Optional[Any]:
_UpperCAmelCase = OpenAIGPTDoubleHeadsModel(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , a_ , a_ , a_ , a_ , *a_ ) -> Dict:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = OpenAIGPTForSequenceClassification(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase_ : Any = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowercase_ : Optional[Any] = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowercase_ : Union[str, Any] = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _a ( self , a_ , a_ , a_ , a_ , a_ ) -> Any:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _a ( self , a_ , a_ , a_=False ) -> Optional[int]:
_UpperCAmelCase = super()._prepare_for_class(a_ , a_ , return_labels=a_ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=a_ , )
_UpperCAmelCase = inputs_dict["labels"]
_UpperCAmelCase = inputs_dict["labels"]
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=a_ , )
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
return inputs_dict
def _a ( self ) -> Optional[int]:
_UpperCAmelCase = OpenAIGPTModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=a_ , n_embd=37 )
def _a ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _a ( self ) -> Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*a_ )
def _a ( self ) -> Tuple:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*a_ )
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*a_ )
@slow
def _a ( self ) -> int:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = OpenAIGPTModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def _a ( self ) -> Any:
_UpperCAmelCase = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" )
model.to(a_ )
_UpperCAmelCase = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=a_ ) # the president is
_UpperCAmelCase = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
_UpperCAmelCase = model.generate(a_ , do_sample=a_ )
self.assertListEqual(output_ids[0].tolist() , a_ )
| 657 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''',
}
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : Tuple = '''mgp-str'''
def __init__( self , a_=[32, 128] , a_=4 , a_=3 , a_=27 , a_=38 , a_=50257 , a_=30522 , a_=768 , a_=12 , a_=12 , a_=4.0 , a_=True , a_=False , a_=1e-5 , a_=0.0 , a_=0.0 , a_=0.0 , a_=False , a_=0.02 , **a_ , ) -> Union[str, Any]:
super().__init__(**a_ )
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = max_token_length
_UpperCAmelCase = num_character_labels
_UpperCAmelCase = num_bpe_labels
_UpperCAmelCase = num_wordpiece_labels
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = distilled
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = drop_rate
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = attn_drop_rate
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = output_aa_attentions
_UpperCAmelCase = initializer_range
| 657 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__=10 ):
"""simple docstring"""
_UpperCAmelCase = []
for _ in range(UpperCamelCase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__=10 ):
"""simple docstring"""
_UpperCAmelCase = []
for step in range(UpperCamelCase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = os.path.join(UpperCamelCase__ , "schedule.bin" )
torch.save(scheduler.state_dict() , UpperCamelCase__ )
_UpperCAmelCase = torch.load(UpperCamelCase__ )
scheduler.load_state_dict(UpperCamelCase__ )
return lrs
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def _a ( self , a_ , a_ , a_ ) -> Optional[int]:
self.assertEqual(len(a_ ) , len(a_ ) )
for a, b in zip(a_ , a_ ):
self.assertAlmostEqual(a_ , a_ , delta=a_ )
def _a ( self ) -> str:
_UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=a_ )
_UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
_UpperCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_UpperCAmelCase = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(100 ):
_UpperCAmelCase = criterion(a_ , a_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=a_ )
_UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
_UpperCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_UpperCAmelCase = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=a_ , weight_decay=0.0 , relative_step=a_ , scale_parameter=a_ , warmup_init=a_ , )
for _ in range(1000 ):
_UpperCAmelCase = criterion(a_ , a_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
lowercase_ : List[Any] = nn.Linear(50 , 50 ) if is_torch_available() else None
lowercase_ : Tuple = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
lowercase_ : Dict = 10
def _a ( self , a_ , a_ , a_ , a_=None ) -> Union[str, Any]:
self.assertEqual(len(a_ ) , len(a_ ) )
for a, b in zip(a_ , a_ ):
self.assertAlmostEqual(a_ , a_ , delta=a_ , msg=a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
_UpperCAmelCase = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
_UpperCAmelCase , _UpperCAmelCase = data
_UpperCAmelCase = scheduler_func(self.optimizer , **a_ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
_UpperCAmelCase = unwrap_schedule(a_ , self.num_steps )
self.assertListAlmostEqual(
a_ , a_ , tol=1e-2 , msg=f"failed for {scheduler_func} in normal scheduler" , )
_UpperCAmelCase = scheduler_func(self.optimizer , **a_ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(a_ ) # wrap to test picklability of the schedule
_UpperCAmelCase = unwrap_and_save_reload_schedule(a_ , self.num_steps )
self.assertListEqual(a_ , a_ , msg=f"failed for {scheduler_func} in save and reload" )
class _lowerCAmelCase :
def __init__( self , a_ ) -> Union[str, Any]:
_UpperCAmelCase = fn
def __call__( self , *a_ , **a_ ) -> Union[str, Any]:
return self.fn(*a_ , **a_ )
@classmethod
def _a ( self , a_ ) -> Dict:
_UpperCAmelCase = list(map(self , scheduler.lr_lambdas ) )
| 657 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''',
}
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : Union[str, Any] = '''transfo-xl'''
lowercase_ : Optional[Any] = ['''mems''']
lowercase_ : Tuple = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , a_=267735 , a_=[20000, 40000, 200000] , a_=1024 , a_=1024 , a_=16 , a_=64 , a_=4096 , a_=4 , a_=False , a_=18 , a_=1600 , a_=1000 , a_=True , a_=True , a_=0 , a_=-1 , a_=True , a_=0.1 , a_=0.0 , a_=True , a_="normal" , a_=0.01 , a_=0.01 , a_=0.02 , a_=1e-5 , a_=0 , **a_ , ) -> int:
_UpperCAmelCase = vocab_size
_UpperCAmelCase = []
self.cutoffs.extend(a_ )
if proj_share_all_but_first:
_UpperCAmelCase = [False] + [True] * len(self.cutoffs )
else:
_UpperCAmelCase = [False] + [False] * len(self.cutoffs )
_UpperCAmelCase = d_model
_UpperCAmelCase = d_embed
_UpperCAmelCase = d_head
_UpperCAmelCase = d_inner
_UpperCAmelCase = div_val
_UpperCAmelCase = pre_lnorm
_UpperCAmelCase = n_layer
_UpperCAmelCase = n_head
_UpperCAmelCase = mem_len
_UpperCAmelCase = same_length
_UpperCAmelCase = attn_type
_UpperCAmelCase = clamp_len
_UpperCAmelCase = sample_softmax
_UpperCAmelCase = adaptive
_UpperCAmelCase = dropout
_UpperCAmelCase = dropatt
_UpperCAmelCase = untie_r
_UpperCAmelCase = init
_UpperCAmelCase = init_range
_UpperCAmelCase = proj_init_std
_UpperCAmelCase = init_std
_UpperCAmelCase = layer_norm_epsilon
super().__init__(eos_token_id=a_ , **a_ )
@property
def _a ( self ) -> Optional[int]:
# Message copied from Transformer-XL documentation
logger.info(f"The model {self.model_type} is one of the few models that has no sequence length limit." )
return -1
@max_position_embeddings.setter
def _a ( self , a_ ) -> str:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f"The model {self.model_type} is one of the few models that has no sequence length limit." )
| 657 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __lowerCamelCase ( UpperCamelCase__=None ):
"""simple docstring"""
if subparsers is not None:
_UpperCAmelCase = subparsers.add_parser("test" )
else:
_UpperCAmelCase = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=UpperCamelCase__ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase__ )
return parser
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
_UpperCAmelCase = script_name
else:
_UpperCAmelCase = f"--config_file={args.config_file} {script_name}"
_UpperCAmelCase = ["accelerate-launch"] + test_args.split()
_UpperCAmelCase = execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = test_command_parser()
_UpperCAmelCase = parser.parse_args()
test_command(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 657 | 1 |
"""simple docstring"""
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
__magic_name__ = data_utils.TransfoXLTokenizer
__magic_name__ = data_utils.TransfoXLCorpus
__magic_name__ = data_utils
__magic_name__ = data_utils
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(UpperCamelCase__ , "rb" ) as fp:
_UpperCAmelCase = pickle.load(UpperCamelCase__ , encoding="latin1" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
_UpperCAmelCase = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"]
print(f"Save vocabulary to {pytorch_vocab_dump_path}" )
_UpperCAmelCase = corpus.vocab.__dict__
torch.save(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase = corpus.__dict__
corpus_dict_no_vocab.pop("vocab" , UpperCamelCase__ )
_UpperCAmelCase = pytorch_dump_folder_path + "/" + CORPUS_NAME
print(f"Save dataset to {pytorch_dataset_dump_path}" )
torch.save(UpperCamelCase__ , UpperCamelCase__ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
_UpperCAmelCase = os.path.abspath(UpperCamelCase__ )
_UpperCAmelCase = os.path.abspath(UpperCamelCase__ )
print(f"Converting Transformer XL checkpoint from {tf_path} with config at {config_path}." )
# Initialise PyTorch model
if transfo_xl_config_file == "":
_UpperCAmelCase = TransfoXLConfig()
else:
_UpperCAmelCase = TransfoXLConfig.from_json_file(UpperCamelCase__ )
print(f"Building PyTorch model from configuration: {config}" )
_UpperCAmelCase = TransfoXLLMHeadModel(UpperCamelCase__ )
_UpperCAmelCase = load_tf_weights_in_transfo_xl(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
_UpperCAmelCase = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
print(f"Save PyTorch model to {os.path.abspath(UpperCamelCase__ )}" )
torch.save(model.state_dict() , UpperCamelCase__ )
print(f"Save configuration file to {os.path.abspath(UpperCamelCase__ )}" )
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--tf_checkpoint_path''',
default='''''',
type=str,
help='''An optional path to a TensorFlow checkpoint path to be converted.''',
)
parser.add_argument(
'''--transfo_xl_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--transfo_xl_dataset_file''',
default='''''',
type=str,
help='''An optional dataset file to be converted in a vocabulary.''',
)
__magic_name__ = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 657 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
return 10 - x * x
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if equation(UpperCamelCase__ ) * equation(UpperCamelCase__ ) >= 0:
raise ValueError("Wrong space!" )
_UpperCAmelCase = a
while (b - a) >= 0.01:
# Find middle point
_UpperCAmelCase = (a + b) / 2
# Check if middle point is root
if equation(UpperCamelCase__ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(UpperCamelCase__ ) * equation(UpperCamelCase__ ) < 0:
_UpperCAmelCase = c
else:
_UpperCAmelCase = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 657 | 1 |
"""simple docstring"""
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=1E-12 ):
"""simple docstring"""
_UpperCAmelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(UpperCamelCase__ , axis=1 ) , a_min=UpperCamelCase__ ) ).T
_UpperCAmelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(UpperCamelCase__ , axis=1 ) , a_min=UpperCamelCase__ ) ).T
return jnp.matmul(UpperCamelCase__ , norm_emb_a.T )
class _lowerCAmelCase ( nn.Module ):
lowercase_ : CLIPConfig
lowercase_ : jnp.dtype = jnp.floataa
def _a ( self ) -> Optional[int]:
_UpperCAmelCase = FlaxCLIPVisionModule(self.config.vision_config )
_UpperCAmelCase = nn.Dense(self.config.projection_dim , use_bias=a_ , dtype=self.dtype )
_UpperCAmelCase = self.param("concept_embeds" , jax.nn.initializers.ones , (17, self.config.projection_dim) )
_UpperCAmelCase = self.param(
"special_care_embeds" , jax.nn.initializers.ones , (3, self.config.projection_dim) )
_UpperCAmelCase = self.param("concept_embeds_weights" , jax.nn.initializers.ones , (17,) )
_UpperCAmelCase = self.param("special_care_embeds_weights" , jax.nn.initializers.ones , (3,) )
def __call__( self , a_ ) -> Dict:
_UpperCAmelCase = self.vision_model(a_ )[1]
_UpperCAmelCase = self.visual_projection(a_ )
_UpperCAmelCase = jax_cosine_distance(a_ , self.special_care_embeds )
_UpperCAmelCase = jax_cosine_distance(a_ , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
_UpperCAmelCase = 0.0
_UpperCAmelCase = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
_UpperCAmelCase = jnp.round(a_ , 3 )
_UpperCAmelCase = jnp.any(special_scores > 0 , axis=1 , keepdims=a_ )
# Use a lower threshold if an image has any special care concept
_UpperCAmelCase = is_special_care * 0.01
_UpperCAmelCase = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
_UpperCAmelCase = jnp.round(a_ , 3 )
_UpperCAmelCase = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : str = CLIPConfig
lowercase_ : List[str] = '''clip_input'''
lowercase_ : Any = FlaxStableDiffusionSafetyCheckerModule
def __init__( self , a_ , a_ = None , a_ = 0 , a_ = jnp.floataa , a_ = True , **a_ , ) -> Any:
if input_shape is None:
_UpperCAmelCase = (1, 224, 224, 3)
_UpperCAmelCase = self.module_class(config=a_ , dtype=a_ , **a_ )
super().__init__(a_ , a_ , input_shape=a_ , seed=a_ , dtype=a_ , _do_init=_do_init )
def _a ( self , a_ , a_ , a_ = None ) -> FrozenDict:
# init input tensor
_UpperCAmelCase = jax.random.normal(a_ , a_ )
_UpperCAmelCase , _UpperCAmelCase = jax.random.split(a_ )
_UpperCAmelCase = {"params": params_rng, "dropout": dropout_rng}
_UpperCAmelCase = self.module.init(a_ , a_ )["params"]
return random_params
def __call__( self , a_ , a_ = None , ) -> Optional[Any]:
_UpperCAmelCase = jnp.transpose(a_ , (0, 2, 3, 1) )
return self.module.apply(
{"params": params or self.params} , jnp.array(a_ , dtype=jnp.floataa ) , rngs={} , )
| 657 |
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
lowercase_ : Tuple = [r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self , a_ , a_ , a_ = None , a_ = 50257 , a_ = 1024 , a_ = 768 , a_ = 12 , a_ = 12 , a_ = None , a_ = "gelu_new" , a_ = 0.1 , a_ = 0.1 , a_ = 0.1 , a_ = 1e-5 , a_ = 0.02 , a_ = True , a_ = True , a_ = False , a_ = False , ) -> List[str]:
super().__init__()
_UpperCAmelCase = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"
f" `n_embd`: {n_embd} are not equal." )
_UpperCAmelCase = prefix_inner_dim
_UpperCAmelCase = prefix_hidden_dim
_UpperCAmelCase = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
_UpperCAmelCase = (
nn.Linear(self.prefix_hidden_dim , a_ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
_UpperCAmelCase = GPTaConfig(
vocab_size=a_ , n_positions=a_ , n_embd=a_ , n_layer=a_ , n_head=a_ , n_inner=a_ , activation_function=a_ , resid_pdrop=a_ , embd_pdrop=a_ , attn_pdrop=a_ , layer_norm_epsilon=a_ , initializer_range=a_ , scale_attn_weights=a_ , use_cache=a_ , scale_attn_by_inverse_layer_idx=a_ , reorder_and_upcast_attn=a_ , )
_UpperCAmelCase = GPTaLMHeadModel(a_ )
def _a ( self , a_ , a_ , a_ = None , a_ = None , ) -> Tuple:
_UpperCAmelCase = self.transformer.transformer.wte(a_ )
_UpperCAmelCase = self.encode_prefix(a_ )
_UpperCAmelCase = self.decode_prefix(a_ )
_UpperCAmelCase = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
_UpperCAmelCase = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
_UpperCAmelCase = torch.cat((dummy_token, input_ids) , dim=1 )
_UpperCAmelCase = self.transformer(inputs_embeds=a_ , labels=a_ , attention_mask=a_ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def _a ( self , a_ , a_ ) -> torch.Tensor:
return torch.zeros(a_ , self.prefix_length , dtype=torch.intaa , device=a_ )
def _a ( self , a_ ) -> Union[str, Any]:
return self.encode_prefix(a_ )
@torch.no_grad()
def _a ( self , a_ , a_ , a_ ) -> Union[str, Any]:
_UpperCAmelCase = torch.split(a_ , 1 , dim=0 )
_UpperCAmelCase = []
_UpperCAmelCase = []
for feature in features:
_UpperCAmelCase = self.decode_prefix(feature.to(a_ ) ) # back to the clip feature
# Only support beam search for now
_UpperCAmelCase , _UpperCAmelCase = self.generate_beam(
input_embeds=a_ , device=a_ , eos_token_id=a_ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
_UpperCAmelCase = torch.stack(a_ )
_UpperCAmelCase = torch.stack(a_ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def _a ( self , a_=None , a_=None , a_=None , a_ = 5 , a_ = 67 , a_ = 1.0 , a_ = None , ) -> Optional[Any]:
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = torch.ones(a_ , device=a_ , dtype=torch.int )
_UpperCAmelCase = torch.zeros(a_ , device=a_ , dtype=torch.bool )
if input_embeds is not None:
_UpperCAmelCase = input_embeds
else:
_UpperCAmelCase = self.transformer.transformer.wte(a_ )
for i in range(a_ ):
_UpperCAmelCase = self.transformer(inputs_embeds=a_ )
_UpperCAmelCase = outputs.logits
_UpperCAmelCase = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
_UpperCAmelCase = logits.softmax(-1 ).log()
if scores is None:
_UpperCAmelCase , _UpperCAmelCase = logits.topk(a_ , -1 )
_UpperCAmelCase = generated.expand(a_ , *generated.shape[1:] )
_UpperCAmelCase , _UpperCAmelCase = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
_UpperCAmelCase = next_tokens
else:
_UpperCAmelCase = tokens.expand(a_ , *tokens.shape[1:] )
_UpperCAmelCase = torch.cat((tokens, next_tokens) , dim=1 )
else:
_UpperCAmelCase = -float(np.inf )
_UpperCAmelCase = 0
_UpperCAmelCase = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
_UpperCAmelCase = scores_sum / seq_lengths[:, None]
_UpperCAmelCase , _UpperCAmelCase = scores_sum_average.view(-1 ).topk(a_ , -1 )
_UpperCAmelCase = next_tokens // scores_sum.shape[1]
_UpperCAmelCase = seq_lengths[next_tokens_source]
_UpperCAmelCase = next_tokens % scores_sum.shape[1]
_UpperCAmelCase = next_tokens.unsqueeze(1 )
_UpperCAmelCase = tokens[next_tokens_source]
_UpperCAmelCase = torch.cat((tokens, next_tokens) , dim=1 )
_UpperCAmelCase = generated[next_tokens_source]
_UpperCAmelCase = scores_sum_average * seq_lengths
_UpperCAmelCase = is_stopped[next_tokens_source]
_UpperCAmelCase = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
_UpperCAmelCase = torch.cat((generated, next_token_embed) , dim=1 )
_UpperCAmelCase = is_stopped + next_tokens.eq(a_ ).squeeze()
if is_stopped.all():
break
_UpperCAmelCase = scores / seq_lengths
_UpperCAmelCase = scores.argsort(descending=a_ )
# tokens tensors are already padded to max_seq_length
_UpperCAmelCase = [tokens[i] for i in order]
_UpperCAmelCase = torch.stack(a_ , dim=0 )
_UpperCAmelCase = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 657 | 1 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
return 10 - x * x
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if equation(UpperCamelCase__ ) * equation(UpperCamelCase__ ) >= 0:
raise ValueError("Wrong space!" )
_UpperCAmelCase = a
while (b - a) >= 0.01:
# Find middle point
_UpperCAmelCase = (a + b) / 2
# Check if middle point is root
if equation(UpperCamelCase__ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(UpperCamelCase__ ) * equation(UpperCamelCase__ ) < 0:
_UpperCAmelCase = c
else:
_UpperCAmelCase = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 657 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__magic_name__ = {'''configuration_gpt_neox''': ['''GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXConfig''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''GPTNeoXTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXForCausalLM''',
'''GPTNeoXForQuestionAnswering''',
'''GPTNeoXForSequenceClassification''',
'''GPTNeoXForTokenClassification''',
'''GPTNeoXLayer''',
'''GPTNeoXModel''',
'''GPTNeoXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 1 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
__magic_name__ = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''')
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( lowerCamelCase , unittest.TestCase ):
lowercase_ : Optional[int] = SpeechTaTokenizer
lowercase_ : Dict = False
lowercase_ : Dict = True
def _a ( self ) -> Any:
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase = SpeechTaTokenizer(a_ )
_UpperCAmelCase = AddedToken("<mask>" , lstrip=a_ , rstrip=a_ )
_UpperCAmelCase = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
tokenizer.save_pretrained(self.tmpdirname )
def _a ( self , a_ ) -> int:
_UpperCAmelCase = "this is a test"
_UpperCAmelCase = "this is a test"
return input_text, output_text
def _a ( self , a_ , a_=False , a_=20 , a_=5 ) -> Union[str, Any]:
_UpperCAmelCase , _UpperCAmelCase = self.get_input_output_texts(a_ )
_UpperCAmelCase = tokenizer.encode(a_ , add_special_tokens=a_ )
_UpperCAmelCase = tokenizer.decode(a_ , clean_up_tokenization_spaces=a_ )
return text, ids
def _a ( self ) -> List[Any]:
_UpperCAmelCase = "<pad>"
_UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ )
def _a ( self ) -> Any:
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-4] , "œ" )
self.assertEqual(vocab_keys[-2] , "<mask>" )
self.assertEqual(vocab_keys[-1] , "<ctc_blank>" )
self.assertEqual(len(a_ ) , 81 )
def _a ( self ) -> str:
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def _a ( self ) -> Tuple:
_UpperCAmelCase = self.get_tokenizers(do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
_UpperCAmelCase = tokenizer.vocab_size
_UpperCAmelCase = len(a_ )
self.assertNotEqual(a_ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_UpperCAmelCase = ["aaaaa bbbbbb", "cccccccccdddddddd"]
_UpperCAmelCase = tokenizer.add_tokens(a_ )
_UpperCAmelCase = tokenizer.vocab_size
_UpperCAmelCase = len(a_ )
self.assertNotEqual(a_ , 0 )
self.assertEqual(a_ , a_ )
self.assertEqual(a_ , len(a_ ) )
self.assertEqual(a_ , all_size + len(a_ ) )
_UpperCAmelCase = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=a_ )
self.assertGreaterEqual(len(a_ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
_UpperCAmelCase = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
_UpperCAmelCase = tokenizer.add_special_tokens(a_ )
_UpperCAmelCase = tokenizer.vocab_size
_UpperCAmelCase = len(a_ )
self.assertNotEqual(a_ , 0 )
self.assertEqual(a_ , a_ )
self.assertEqual(a_ , len(a_ ) )
self.assertEqual(a_ , all_size_a + len(a_ ) )
_UpperCAmelCase = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=a_ )
self.assertGreaterEqual(len(a_ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def _a ( self ) -> Tuple:
pass
def _a ( self ) -> str:
pass
def _a ( self ) -> Any:
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = tokenizer.tokenize("This is a test" )
# fmt: off
self.assertListEqual(a_ , [SPIECE_UNDERLINE, "T", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "a", SPIECE_UNDERLINE, "t", "e", "s", "t"] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a_ ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
_UpperCAmelCase = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
a_ , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "92000", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(a_ )
# fmt: off
self.assertListEqual(a_ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(a_ )
self.assertListEqual(
a_ , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "<unk>", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
@slow
def _a ( self ) -> Dict:
# Use custom sequence because this tokenizer does not handle numbers.
_UpperCAmelCase = [
"Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides "
"general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural "
"Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained "
"models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.",
"BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly "
"conditioning on both left and right context in all layers.",
"The quick brown fox jumps over the lazy dog.",
]
# fmt: off
_UpperCAmelCase = {
"input_ids": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a_ , model_name="microsoft/speecht5_asr" , revision="c5ef64c71905caeccde0e4462ef3f9077224c524" , sequences=a_ , )
| 657 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : Union[str, Any] = '''convbert'''
def __init__( self , a_=30522 , a_=768 , a_=12 , a_=12 , a_=3072 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=2 , a_=0.02 , a_=1e-12 , a_=1 , a_=0 , a_=2 , a_=768 , a_=2 , a_=9 , a_=1 , a_=None , **a_ , ) -> Tuple:
super().__init__(
pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ , )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = embedding_size
_UpperCAmelCase = head_ratio
_UpperCAmelCase = conv_kernel_size
_UpperCAmelCase = num_groups
_UpperCAmelCase = classifier_dropout
class _lowerCAmelCase ( lowerCamelCase ):
@property
def _a ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_UpperCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 657 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
__magic_name__ = '''2020.9.26'''
__magic_name__ = '''xcodz-dot, cclaus, dhruvmanila'''
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if not all(isinstance(UpperCamelCase__ , (float, int) ) for val in locals().values() ):
_UpperCAmelCase = f"Input values must either be float or int: {list(locals().values() )}"
raise TypeError(UpperCamelCase__ )
_UpperCAmelCase = ((x * distance) / (z + distance)) * scale
_UpperCAmelCase = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError("Axis must be a str" )
_UpperCAmelCase = locals()
del input_variables["axis"]
if not all(isinstance(UpperCamelCase__ , (float, int) ) for val in input_variables.values() ):
_UpperCAmelCase = (
"Input values except axis must either be float or int: "
f"{list(input_variables.values() )}"
)
raise TypeError(UpperCamelCase__ )
_UpperCAmelCase = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
_UpperCAmelCase = x * math.cos(UpperCamelCase__ ) - y * math.sin(UpperCamelCase__ )
_UpperCAmelCase = y * math.cos(UpperCamelCase__ ) + x * math.sin(UpperCamelCase__ )
_UpperCAmelCase = z
elif axis == "x":
_UpperCAmelCase = y * math.cos(UpperCamelCase__ ) - z * math.sin(UpperCamelCase__ )
_UpperCAmelCase = z * math.cos(UpperCamelCase__ ) + y * math.sin(UpperCamelCase__ )
_UpperCAmelCase = x
elif axis == "y":
_UpperCAmelCase = x * math.cos(UpperCamelCase__ ) - z * math.sin(UpperCamelCase__ )
_UpperCAmelCase = z * math.cos(UpperCamelCase__ ) + x * math.sin(UpperCamelCase__ )
_UpperCAmelCase = y
else:
raise ValueError("not a valid axis, choose one of 'x', 'y', 'z'" )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }''')
print(f'''{rotate(1.0, 2.0, 3.0, "y", 90.0) = }''')
| 657 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
return "".join([hex(UpperCamelCase__ )[2:].zfill(2 ).upper() for byte in list(UpperCamelCase__ )] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if (len(UpperCamelCase__ ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(UpperCamelCase__ ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(UpperCamelCase__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 | 1 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = Dict[str, Any]
__magic_name__ = List[Prediction]
@add_end_docstrings(lowerCamelCase )
class _lowerCAmelCase ( lowerCamelCase ):
def __init__( self , *a_ , **a_ ) -> Optional[int]:
super().__init__(*a_ , **a_ )
if self.framework == "tf":
raise ValueError(f"The {self.__class__} is only available in PyTorch." )
requires_backends(self , "vision" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def _a ( self , **a_ ) -> List[str]:
_UpperCAmelCase = {}
if "threshold" in kwargs:
_UpperCAmelCase = kwargs["threshold"]
return {}, {}, postprocess_kwargs
def __call__( self , *a_ , **a_ ) -> Union[Predictions, List[Prediction]]:
return super().__call__(*a_ , **a_ )
def _a ( self , a_ ) -> Optional[Any]:
_UpperCAmelCase = load_image(a_ )
_UpperCAmelCase = torch.IntTensor([[image.height, image.width]] )
_UpperCAmelCase = self.image_processor(images=[image] , return_tensors="pt" )
if self.tokenizer is not None:
_UpperCAmelCase = self.tokenizer(text=inputs["words"] , boxes=inputs["boxes"] , return_tensors="pt" )
_UpperCAmelCase = target_size
return inputs
def _a ( self , a_ ) -> Optional[Any]:
_UpperCAmelCase = model_inputs.pop("target_size" )
_UpperCAmelCase = self.model(**a_ )
_UpperCAmelCase = outputs.__class__({"target_size": target_size, **outputs} )
if self.tokenizer is not None:
_UpperCAmelCase = model_inputs["bbox"]
return model_outputs
def _a ( self , a_ , a_=0.9 ) -> int:
_UpperCAmelCase = model_outputs["target_size"]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
_UpperCAmelCase , _UpperCAmelCase = target_size[0].tolist()
def unnormalize(a_ ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
_UpperCAmelCase , _UpperCAmelCase = model_outputs["logits"].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
_UpperCAmelCase = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
_UpperCAmelCase = [unnormalize(a_ ) for bbox in model_outputs["bbox"].squeeze(0 )]
_UpperCAmelCase = ["score", "label", "box"]
_UpperCAmelCase = [dict(zip(a_ , a_ ) ) for vals in zip(scores.tolist() , a_ , a_ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
_UpperCAmelCase = self.image_processor.post_process_object_detection(a_ , a_ , a_ )
_UpperCAmelCase = raw_annotations[0]
_UpperCAmelCase = raw_annotation["scores"]
_UpperCAmelCase = raw_annotation["labels"]
_UpperCAmelCase = raw_annotation["boxes"]
_UpperCAmelCase = scores.tolist()
_UpperCAmelCase = [self.model.config.idalabel[label.item()] for label in labels]
_UpperCAmelCase = [self._get_bounding_box(a_ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
_UpperCAmelCase = ["score", "label", "box"]
_UpperCAmelCase = [
dict(zip(a_ , a_ ) )
for vals in zip(raw_annotation["scores"] , raw_annotation["labels"] , raw_annotation["boxes"] )
]
return annotation
def _a ( self , a_ ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError("The ObjectDetectionPipeline is only available in PyTorch." )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = box.int().tolist()
_UpperCAmelCase = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 657 |
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
try:
_UpperCAmelCase = float(UpperCamelCase__ )
except ValueError:
raise ValueError("Please enter a valid number" )
_UpperCAmelCase = decimal - int(UpperCamelCase__ )
if fractional_part == 0:
return int(UpperCamelCase__ ), 1
else:
_UpperCAmelCase = len(str(UpperCamelCase__ ).split("." )[1] )
_UpperCAmelCase = int(decimal * (10**number_of_frac_digits) )
_UpperCAmelCase = 10**number_of_frac_digits
_UpperCAmelCase , _UpperCAmelCase = denominator, numerator
while True:
_UpperCAmelCase = dividend % divisor
if remainder == 0:
break
_UpperCAmelCase , _UpperCAmelCase = divisor, remainder
_UpperCAmelCase , _UpperCAmelCase = numerator / divisor, denominator / divisor
return int(UpperCamelCase__ ), int(UpperCamelCase__ )
if __name__ == "__main__":
print(f'''{decimal_to_fraction(2) = }''')
print(f'''{decimal_to_fraction(89.0) = }''')
print(f'''{decimal_to_fraction("67") = }''')
print(f'''{decimal_to_fraction("45.0") = }''')
print(f'''{decimal_to_fraction(1.5) = }''')
print(f'''{decimal_to_fraction("6.25") = }''')
print(f'''{decimal_to_fraction("78td") = }''')
| 657 | 1 |
"""simple docstring"""
__magic_name__ = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__magic_name__ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__magic_name__ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 657 |
"""simple docstring"""
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_UpperCAmelCase = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
_UpperCAmelCase = f"{src_lang}-{tgt_lang}"
_UpperCAmelCase = f"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n"
model_card_dir.mkdir(parents=UpperCamelCase__ , exist_ok=UpperCamelCase__ )
_UpperCAmelCase = os.path.join(UpperCamelCase__ , "README.md" )
print(f"Generating {path}" )
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as f:
f.write(UpperCamelCase__ )
# make sure we are under the root of the project
__magic_name__ = Path(__file__).resolve().parent.parent.parent
__magic_name__ = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
__magic_name__ = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name)
| 657 | 1 |
"""simple docstring"""
import os
import numpy
import onnx
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = a.name
_UpperCAmelCase = b.name
_UpperCAmelCase = ""
_UpperCAmelCase = ""
_UpperCAmelCase = a == b
_UpperCAmelCase = name_a
_UpperCAmelCase = name_b
return res
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(UpperCamelCase__ , UpperCamelCase__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCamelCase__ , UpperCamelCase__ )
_graph_replace_input_with(node_proto.attribute[1].g , UpperCamelCase__ , UpperCamelCase__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCamelCase__ , UpperCamelCase__ )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = list(model.graph.initializer )
_UpperCAmelCase = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
_UpperCAmelCase = inits[i].name
_UpperCAmelCase = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , UpperCamelCase__ , UpperCamelCase__ )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = os.path.dirname(UpperCamelCase__ )
_UpperCAmelCase = os.path.basename(UpperCamelCase__ )
_UpperCAmelCase = onnx.load(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
_UpperCAmelCase = list(model.graph.initializer )
_UpperCAmelCase = set()
_UpperCAmelCase = {}
_UpperCAmelCase = []
_UpperCAmelCase = 0
for i in range(len(UpperCamelCase__ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(UpperCamelCase__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(UpperCamelCase__ )
dup_set.add(UpperCamelCase__ )
_UpperCAmelCase = inits[j].data_type
_UpperCAmelCase = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("unexpected data type: " , UpperCamelCase__ )
total_reduced_size += mem_size
_UpperCAmelCase = inits[i].name
_UpperCAmelCase = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(UpperCamelCase__ )
else:
_UpperCAmelCase = [name_j]
ind_to_replace.append((j, i) )
print("total reduced size: " , total_reduced_size / 1024 / 1024 / 1024 , "GB" )
_UpperCAmelCase = sorted(UpperCamelCase__ )
_remove_dup_initializers_from_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase = "optimized_" + model_file_name
_UpperCAmelCase = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
onnx.save(UpperCamelCase__ , UpperCamelCase__ )
return new_model
| 657 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _lowerCAmelCase ( metaclass=lowerCamelCase ):
lowercase_ : Dict = ['''torch''', '''torchsde''']
def __init__( self , *a_ , **a_ ) -> Optional[int]:
requires_backends(self , ["torch", "torchsde"] )
@classmethod
def _a ( cls , *a_ , **a_ ) -> Optional[Any]:
requires_backends(cls , ["torch", "torchsde"] )
@classmethod
def _a ( cls , *a_ , **a_ ) -> List[Any]:
requires_backends(cls , ["torch", "torchsde"] )
| 657 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {
'''configuration_blenderbot''': [
'''BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotConfig''',
'''BlenderbotOnnxConfig''',
],
'''tokenization_blenderbot''': ['''BlenderbotTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''BlenderbotTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotForCausalLM''',
'''BlenderbotForConditionalGeneration''',
'''BlenderbotModel''',
'''BlenderbotPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''TFBlenderbotForConditionalGeneration''',
'''TFBlenderbotModel''',
'''TFBlenderbotPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''FlaxBlenderbotForConditionalGeneration''',
'''FlaxBlenderbotModel''',
'''FlaxBlenderbotPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
__magic_name__ = logging.get_logger(__name__)
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : Optional[Any] = '''upernet'''
def __init__( self , a_=None , a_=512 , a_=0.02 , a_=[1, 2, 3, 6] , a_=True , a_=0.4 , a_=384 , a_=256 , a_=1 , a_=False , a_=255 , **a_ , ) -> List[Any]:
super().__init__(**a_ )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_UpperCAmelCase = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(a_ , a_ ):
_UpperCAmelCase = backbone_config.get("model_type" )
_UpperCAmelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase = config_class.from_dict(a_ )
_UpperCAmelCase = backbone_config
_UpperCAmelCase = hidden_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = pool_scales
_UpperCAmelCase = use_auxiliary_head
_UpperCAmelCase = auxiliary_loss_weight
_UpperCAmelCase = auxiliary_in_channels
_UpperCAmelCase = auxiliary_channels
_UpperCAmelCase = auxiliary_num_convs
_UpperCAmelCase = auxiliary_concat_input
_UpperCAmelCase = loss_ignore_index
def _a ( self ) -> int:
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.backbone_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
| 657 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import _LazyModule
__magic_name__ = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 657 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.